patch-2.3.99-pre6 linux/drivers/i2o/i2o_lan.c
Next file: linux/drivers/i2o/i2o_lan.h
Previous file: linux/drivers/i2o/i2o_core.c
Back to the patch index
Back to the overall index
- Lines: 1996
- Date:
Wed Apr 12 09:38:53 2000
- Orig file:
v2.3.99-pre5/linux/drivers/i2o/i2o_lan.c
- Orig date:
Thu Mar 2 14:36:22 2000
diff -u --recursive --new-file v2.3.99-pre5/linux/drivers/i2o/i2o_lan.c linux/drivers/i2o/i2o_lan.c
@@ -1,10 +1,10 @@
/*
- * linux/drivers/i2o/i2o_lan.c
+ * drivers/i2o/i2o_lan.c
*
- * I2O LAN CLASS OSM January 7th 1999
+ * I2O LAN CLASS OSM April 3rd 2000
*
- * (C) Copyright 1999 University of Helsinki,
- * Department of Computer Science
+ * (C) Copyright 1999, 2000 University of Helsinki,
+ * Department of Computer Science
*
* This code is still under development / test.
*
@@ -14,7 +14,8 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
- * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ * Fixes: Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ * Taneli Vähäkangas <Taneli.Vahakangas@cs.Helsinki.FI>
* Deepak Saxena <deepak@plexity.net>
*
* Tested: in FDDI environment (using SysKonnect's DDM)
@@ -31,10 +32,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
+#include <linux/trdevice.h>
+#include <linux/fcdevice.h>
+
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/malloc.h>
-#include <linux/trdevice.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/tqueue.h>
@@ -52,46 +55,62 @@
#define dprintk(s, args...)
#endif
-/* Module params */
-
-static u32 bucketpost = I2O_BUCKET_COUNT;
-static u32 bucketthresh = I2O_BUCKET_THRESH;
-static u32 rx_copybreak = 200;
+/* The following module parameters are used as default values
+ * for per interface values located in the net_device private area.
+ * Private values are changed via /proc filesystem.
+ */
+static u32 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
+static u32 bucket_thresh = I2O_LAN_BUCKET_THRESH;
+static u32 rx_copybreak = I2O_LAN_RX_COPYBREAK;
+static tx_batch_mode = I2O_LAN_TX_BATCH_MODE;
+static i2o_event_mask = I2O_LAN_EVENT_MASK;
#define MAX_LAN_CARDS 16
static struct net_device *i2o_landevs[MAX_LAN_CARDS+1];
-static int unit = -1; /* device unit number */
+static int unit = -1; /* device unit number */
-struct i2o_lan_local {
- u8 unit;
- struct i2o_device *i2o_dev;
- struct fddi_statistics stats; /* see also struct net_device_stats */
- unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
- u32 bucket_count; /* nbr of buckets sent to DDM */
- u32 tx_count; /* packets in one TX message frame */
- u32 tx_max_out; /* DDM's Tx queue len */
- u32 tx_out; /* outstanding TXes */
- u32 sgl_max; /* max SGLs in one message frame */
- u32 m; /* IOP address of msg frame */
-
- struct tq_struct i2o_batch_send_task;
- struct sk_buff **i2o_fbl; /* Free bucket list (to reuse skbs) */
- int i2o_fbl_tail;
+extern rwlock_t dev_mc_lock;
- spinlock_t lock;
-};
-
-static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
- struct i2o_message *m);
-static void i2o_lan_event_reply(struct net_device *dev, u32 *msg);
+static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
+static void i2o_lan_send_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
static int i2o_lan_receive_post(struct net_device *dev);
-static int i2o_lan_receive_post_reply(struct net_device *dev, u32 *msg);
+static void i2o_lan_receive_post_reply(struct i2o_handler *h, struct i2o_controller *iop, struct i2o_message *m);
static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg);
+static int i2o_lan_reset(struct net_device *dev);
+static void i2o_lan_handle_event(struct net_device *dev, u32 *msg);
+
+/* Structures to register handlers for the incoming replies. */
+
+static struct i2o_handler i2o_lan_send_handler = {
+ i2o_lan_send_post_reply, // For send replies
+ NULL,
+ NULL,
+ NULL,
+ "I2O Lan OSM send",
+ -1,
+ I2O_CLASS_LAN
+};
+static int lan_send_context;
+
+static struct i2o_handler i2o_lan_receive_handler = {
+ i2o_lan_receive_post_reply, // For receive replies
+ NULL,
+ NULL,
+ NULL,
+ "I2O Lan OSM receive",
+ -1,
+ I2O_CLASS_LAN
+};
+static int lan_receive_context;
+
static struct i2o_handler i2o_lan_handler = {
- i2o_lan_reply,
+ i2o_lan_reply, // For other replies
+ NULL,
+ NULL,
+ NULL,
"I2O Lan OSM",
- 0, // context
+ -1,
I2O_CLASS_LAN
};
static int lan_context;
@@ -100,245 +119,264 @@
0, 0, (void (*)(void *))i2o_lan_receive_post, (void *) 0
};
+/* Functions to handle message failures and transaction errors:
+==============================================================*/
+
/*
- * i2o_lan_reply(): The only callback function to handle incoming messages.
+ * i2o_lan_handle_failure(): Fail bit has been set since IOP's message
+ * layer cannot deliver the request to the target, or the target cannot
+ * process the request.
*/
-static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
- struct i2o_message *m)
+static void i2o_lan_handle_failure(struct net_device *dev, u32 *msg)
{
- u32 *msg = (u32 *)m;
- u8 unit = (u8)(msg[2]>>16); // InitiatorContext
- struct net_device *dev = i2o_landevs[unit];
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
- if (msg[0] & (1<<13)) { // Fail bit is set
- printk(KERN_ERR "%s: IOP failed to process the msg:\n",dev->name);
- printk(KERN_ERR " Cmd = 0x%02X, InitiatorTid = %d, TargetTid = %d\n",
- (msg[1] >> 24) & 0xFF, (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
- printk(KERN_ERR " FailureCode = 0x%02X\n Severity = 0x%02X\n "
- "LowestVersion = 0x%02X\n HighestVersion = 0x%02X\n",
- msg[4] >> 24, (msg[4] >> 16) & 0xFF,
- (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
- printk(KERN_ERR " FailingHostUnit = 0x%04X\n FailingIOP = 0x%03X\n",
- msg[5] >> 16, msg[5] & 0xFFF);
- return;
- }
+ u32 *preserved_msg = (u32*)(iop->mem_offset + msg[7]);
+ // FIXME on 64-bit host
+ u32 *sgl_elem = &preserved_msg[4];
+ struct sk_buff *skb = NULL;
+ u8 le_flag;
+
+// To be added to i2o_core.c
+// i2o_report_failure(KERN_INFO, iop, dev->name, msg);
+
+ /* If PacketSend failed, free sk_buffs reserved by upper layers */
+
+ if (msg[1] >> 24 == LAN_PACKET_SEND) {
+ do {
+ skb = (struct sk_buff *)(sgl_elem[1]);
+ dev_kfree_skb_irq(skb);
+
+ atomic_dec(&priv->tx_out);
+
+ le_flag = *sgl_elem >> 31;
+ sgl_elem +=3;
+ } while (le_flag == 0); /* Last element flag not set */
-#ifndef DRIVERDEBUG
- if (msg[4] >> 24) /* ReqStatus != SUCCESS */
-#endif
- i2o_report_status(KERN_INFO, dev->name, msg);
-
- switch (msg[1] >> 24) {
- case LAN_RECEIVE_POST:
- {
- if (netif_running(dev)) {
- if (!(msg[4]>>24)) {
- i2o_lan_receive_post_reply(dev,msg);
- break;
- }
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
- // Something VERY wrong if this is happening
- printk( KERN_WARNING "%s: rejected bucket post.\n", dev->name);
- }
+ /* If ReceivePost failed, free sk_buffs we have reserved */
- // Shutting down, we are getting unused buckets back
- i2o_lan_release_buckets(dev,msg);
-
- break;
+ if (msg[1] >> 24 == LAN_RECEIVE_POST) {
+ do {
+ skb = (struct sk_buff *)(sgl_elem[1]);
+ dev_kfree_skb_irq(skb);
+
+ atomic_dec(&priv->buckets_out);
+
+ le_flag = *sgl_elem >> 31;
+ sgl_elem +=3;
+ } while (le_flag == 0); /* Last element flag not set */
}
- case LAN_PACKET_SEND:
- case LAN_SDU_SEND:
- {
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- u8 trl_count = msg[3] & 0x000000FF;
+ /* Release the preserved msg frame by resubmitting it as a NOP */
- while (trl_count) {
- // The HDM has handled the outgoing packet
- dev_kfree_skb((struct sk_buff *)msg[4 + trl_count]);
- dprintk(KERN_INFO "%s: Request skb freed (trl_count=%d).\n",
- dev->name,trl_count);
- priv->tx_out--;
- trl_count--;
- }
+ preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0;
+ preserved_msg[2] = 0;
+ i2o_post_message(iop, msg[7]);
+}
+/*
+ * i2o_lan_handle_transaction_error(): IOP or DDM has rejected the request
+ * for general cause (format error, bad function code, insufficient resources,
+ * etc.). We get one transaction_error for each failed transaction.
+ */
+static void i2o_lan_handle_transaction_error(struct net_device *dev, u32 *msg)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct sk_buff *skb;
+
+// To be added to i2o_core.c
+// i2o_report_transaction_error(KERN_INFO, dev->name, msg);
+
+ /* If PacketSend was rejected, free sk_buff reserved by upper layers */
+
+ if (msg[1] >> 24 == LAN_PACKET_SEND) {
+ skb = (struct sk_buff *)(msg[3]); // TransactionContext
+ dev_kfree_skb_irq(skb);
+ atomic_dec(&priv->tx_out);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
-
- break;
- }
-
- case LAN_RESET: /* default reply without payload */
- case LAN_SUSPEND:
- break;
+ }
- case I2O_CMD_UTIL_EVT_REGISTER:
- case I2O_CMD_UTIL_EVT_ACK:
- i2o_lan_event_reply(dev, msg);
- break;
+ /* If ReceivePost was rejected, free sk_buff we have reserved */
- default:
- printk(KERN_ERR "%s: No handler for the reply.\n", dev->name);
- i2o_report_status(KERN_INFO, dev->name, msg);
+ if (msg[1] >> 24 == LAN_RECEIVE_POST) {
+ skb = (struct sk_buff *)(msg[3]);
+ dev_kfree_skb_irq(skb);
+ atomic_dec(&priv->buckets_out);
}
}
/*
- * i2o_lan_event_reply(): Handle events.
+ * i2o_lan_handle_status(): Common parts of handling a not succeeded request
+ * (status != SUCCESS).
*/
-static void i2o_lan_event_reply(struct net_device *dev, u32 *msg)
+static int i2o_lan_handle_status(struct net_device *dev, u32 *msg)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- struct i2o_device *i2o_dev = priv->i2o_dev;
- struct i2o_controller *iop = i2o_dev->controller;
- struct i2o_reply {
- u8 version_offset;
- u8 msg_flags;
- u16 msg_size;
- u32 tid:12;
- u32 initiator:12;
- u32 function:8;
- u32 initiator_context;
- u32 transaction_context;
- u32 evt_indicator;
- u32 evt_data[(iop->inbound_size - 20) / 4]; /* max */
- } *evt = (struct i2o_reply *)msg;
-
- int evt_data_len = (evt->msg_size - 5) * 4; /* real */
-
- if (evt->function == I2O_CMD_UTIL_EVT_REGISTER) {
- printk(KERN_INFO "%s: I2O event - ", dev->name);
-
- switch (evt->evt_indicator) {
- case I2O_EVT_IND_STATE_CHANGE:
- printk("State chance 0x%08X.\n",
- evt->evt_data[0]);
- break;
- case I2O_EVT_IND_GENERAL_WARNING:
- printk("General warning 0x%02X.\n",
- evt->evt_data[0]);
- break;
- case I2O_EVT_IND_CONFIGURATION_FLAG:
- printk("Configuration requested.\n");
- break;
- case I2O_EVT_IND_LOCK_RELEASE:
- printk("Lock released.\n");
- break;
- case I2O_EVT_IND_CAPABILITY_CHANGE:
- printk("Capability change 0x%02X.\n",
- evt->evt_data[0]);
- break;
- case I2O_EVT_IND_DEVICE_RESET:
- printk("Device reset.\n");
- break;
- case I2O_EVT_IND_EVT_MASK_MODIFIED:
- printk("Event mask modified, 0x%08X.\n",
- evt->evt_data[0]);
- break;
- case I2O_EVT_IND_FIELD_MODIFIED: {
- u16 *work16 = (u16 *)evt->evt_data;
- printk("Group 0x%04X, field %d changed.\n",
- work16[0], work16[1]);
- break;
- }
- case I2O_EVT_IND_VENDOR_EVT: {
- int i;
- printk("Vendor event:\n");
- for (i = 0; i < evt_data_len / 4; i++)
- printk(" 0x%08X\n", evt->evt_data[i]);
- break;
- }
- case I2O_EVT_IND_DEVICE_STATE:
- printk("Device state changed 0x%08X.\n",
- evt->evt_data[0]);
- break;
- case I2O_LAN_EVT_LINK_DOWN:
- printk("Link to the physical device is lost.\n");
- break;
- case I2O_LAN_EVT_LINK_UP:
- printk("Link to the physical device is (re)established.\n");
- break;
- case I2O_LAN_EVT_MEDIA_CHANGE:
- printk("Media change.\n");
- break;
- default:
- printk("Event Indicator = 0x%08X.\n",
- evt->evt_indicator);
- }
-
- /*
- * EventAck necessary only for events that cause the device
- * to syncronize with the user
- *
- *if (i2o_event_ack(iop, i2o_dev->lct_data->tid,
- * priv->unit << 16 | lan_context,
- * evt->evt_indicator,
- * evt->evt_data, evt_data_len) < 0)
- * printk("%s: Event Acknowledge timeout.\n", dev->name);
- */
- }
+ /* Fail bit set? */
+
+ if (msg[0] & MSG_FAIL) {
+ i2o_lan_handle_failure(dev, msg);
+ return -1;
+ }
+
+ /* Message rejected for general cause? */
+
+ if ((msg[4]>>24) == I2O_REPLY_STATUS_TRANSACTION_ERROR) {
+ i2o_lan_handle_transaction_error(dev, msg);
+ return -1;
+ }
- /* else evt->function == I2O_CMD_UTIL_EVT_ACK) */
- /* Do we need to do something here too? */
+ /* Else have to handle it in the callback function */
+
+ return 0;
}
+/* Callback functions called from the interrupt routine:
+=======================================================*/
+
/*
- * i2o_lan_release_buckets(): Handle unused buckets.
+ * i2o_lan_send_post_reply(): Callback function to handle PostSend replies.
*/
-static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg)
+static void i2o_lan_send_post_reply(struct i2o_handler *h,
+ struct i2o_controller *iop, struct i2o_message *m)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- u8 trl_count = (u8)(msg[3] & 0x000000FF);
- u32 *pskb = &msg[6];
+ u32 *msg = (u32 *)m;
+ u8 unit = (u8)(msg[2]>>16); // InitiatorContext
+ struct net_device *dev = i2o_landevs[unit];
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ u8 trl_count = msg[3] & 0x000000FF;
- while (trl_count--) {
- dprintk("%s: Releasing unused sk_buff %p.\n",dev->name,
- (struct sk_buff*)(*pskb));
- dev_kfree_skb((struct sk_buff*)(*pskb));
- pskb++;
- priv->bucket_count--;
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, dev->name, msg);
+#endif
+
+ if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
+ if (i2o_lan_handle_status(dev, msg))
+ return;
+
+ /* Else we get pending transmit request(s) back */
+ }
+
+ /* DDM has handled transmit request(s), free sk_buffs */
+
+ while (trl_count) {
+ dev_kfree_skb_irq((struct sk_buff *)msg[4 + trl_count]);
+ dprintk(KERN_INFO "%s: Request skb freed (trl_count=%d).\n",
+ dev->name, trl_count);
+ atomic_dec(&priv->tx_out);
+ trl_count--;
}
+
+ /* If priv->tx_out had reached tx_max_out, the queue was stopped */
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
}
/*
- * i2o_lan_receive_post_reply(): Process incoming packets.
+ * i2o_lan_receive_post_reply(): Callback function to process incoming packets.
*/
-static int i2o_lan_receive_post_reply(struct net_device *dev, u32 *msg)
+static void i2o_lan_receive_post_reply(struct i2o_handler *h,
+ struct i2o_controller *iop, struct i2o_message *m)
{
+ u32 *msg = (u32 *)m;
+ u8 unit = (u8)(msg[2]>>16); // InitiatorContext
+ struct net_device *dev = i2o_landevs[unit];
+
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_bucket_descriptor *bucket = (struct i2o_bucket_descriptor *)&msg[6];
struct i2o_packet_info *packet;
u8 trl_count = msg[3] & 0x000000FF;
struct sk_buff *skb, *old_skb;
+ unsigned long flags = 0;
+
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, dev->name, msg);
+#endif
+
+ if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
+ if (i2o_lan_handle_status(dev, msg))
+ return;
+
+ /* Getting unused buckets back? */
+
+ if (msg[4] & I2O_LAN_DSC_CANCELED ||
+ msg[4] & I2O_LAN_DSC_RECEIVE_ABORTED) {
+ i2o_lan_release_buckets(dev, msg);
+ return;
+ }
+
+ /* Which DetailedStatusCodes need special treatment? */
+ }
+
+ /* Else we are receiving incoming post. */
while (trl_count--) {
- skb = (struct sk_buff *)bucket->context;
- packet = (struct i2o_packet_info *)bucket->packet_info;
- priv->bucket_count--;
+ skb = (struct sk_buff *)bucket->context;
+ packet = (struct i2o_packet_info *)bucket->packet_info;
+ atomic_dec(&priv->buckets_out);
+#if 0
+/* Is this enough? If we get erroneous bucket, we can't assume that skb could
+ * be reused, can we?
+ */
+
+ /* Should we optimise these ifs away from the fast path? -taneli */
+ if (packet->flags & 0x0f) {
+
+ if (packet->flags & 0x01)
+ printk(KERN_WARNING "%s: packet with errors.\n", dev->name);
+ if (packet->flags & 0x0c)
+ /* This actually means that the hw is b0rken, since we
+ have asked it to not send fragmented packets. */
+ printk(KERN_DEBUG "%s: multi-bucket packets not supported!\n", dev->name);
+ bucket++;
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
- if (packet->len < rx_copybreak) {
+ if (packet->status & 0xff) {
+ /* Silently discard, unless debugging. */
+ dprintk(KERN_DEBUG "%s: toasted packet received.\n", dev->name);
+ bucket++;
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+#endif
+ if (packet->len < priv->rx_copybreak) {
old_skb = skb;
- skb = (struct sk_buff *)dev_alloc_skb(packet->len+2);
+ skb = (struct sk_buff *)dev_alloc_skb(packet->len+2);
if (skb == NULL) {
- printk("%s: Can't allocate skb.\n", dev->name);
- return -ENOMEM;
- }
- skb_reserve(skb,2);
- memcpy(skb_put(skb,packet->len), old_skb->data, packet->len);
+ printk(KERN_ERR "%s: Can't allocate skb.\n", dev->name);
+ return;
+ }
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, packet->len), old_skb->data, packet->len);
- if (priv->i2o_fbl_tail < I2O_BUCKET_COUNT)
- priv->i2o_fbl[++priv->i2o_fbl_tail] = old_skb;
+ spin_lock_irqsave(&priv->fbl_lock, flags);
+ if (priv->i2o_fbl_tail < I2O_LAN_MAX_BUCKETS_OUT)
+ priv->i2o_fbl[++priv->i2o_fbl_tail] = old_skb;
else
- dev_kfree_skb(old_skb);
+ dev_kfree_skb_irq(old_skb);
+ spin_unlock_irqrestore(&priv->fbl_lock, flags);
} else
- skb_put(skb,packet->len);
-
+ skb_put(skb, packet->len);
+
skb->dev = dev;
skb->protocol = priv->type_trans(skb, dev);
netif_rx(skb);
+ dev->last_rx = jiffies;
dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
- "to upper level.\n",dev->name,packet->len);
+ "to upper level.\n", dev->name, packet->len);
bucket++; // to next Packet Descriptor Block
}
@@ -346,19 +384,193 @@
#ifdef DRIVERDEBUG
if (msg[5] == 0)
printk(KERN_INFO "%s: DDM out of buckets (priv->count = %d)!\n",
- dev->name, priv->bucket_count);
+ dev->name, atomic_read(&priv->buckets_out));
#endif
- if (priv->bucket_count <= bucketpost - bucketthresh) {
- i2o_post_buckets_task.data = (void *)dev;
- queue_task(&i2o_post_buckets_task, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
- /* Note: the task is queued only once */
+ /* If DDM has already consumed bucket_tresh buckets, post new ones */
+
+ if (atomic_read(&priv->buckets_out) <= priv->max_buckets_out - priv->bucket_thresh) {
+ i2o_post_buckets_task.data = (void *)dev;
+ queue_task(&i2o_post_buckets_task, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ }
+
+ return;
+}
+
+/*
+ * i2o_lan_reply(): Callback function to handle other incoming messages
+ * except SendPost and ReceivePost.
+ */
+static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
+ struct i2o_message *m)
+{
+ u32 *msg = (u32 *)m;
+ u8 unit = (u8)(msg[2]>>16); // InitiatorContext
+ struct net_device *dev = i2o_landevs[unit];
+
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, dev->name, msg);
+#endif
+
+ if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
+ if (i2o_lan_handle_status(dev, msg))
+ return;
+
+ /* This should NOT be reached */
+ }
+
+ switch (msg[1] >> 24) {
+ case LAN_RESET:
+ case LAN_SUSPEND:
+ /* default reply without payload */
+ break;
+ case I2O_CMD_UTIL_EVT_REGISTER:
+ case I2O_CMD_UTIL_EVT_ACK:
+ i2o_lan_handle_event(dev, msg);
+ break;
+ default:
+ printk(KERN_ERR "%s: No handler for the reply.\n",
+ dev->name);
+ i2o_report_status(KERN_INFO, dev->name, msg);
+ }
+}
+
+/* Functions used by the above callback functions:
+=================================================*/
+/*
+ * i2o_lan_release_buckets(): Free unused buckets (sk_buffs).
+ */
+static void i2o_lan_release_buckets(struct net_device *dev, u32 *msg)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ u8 trl_elem_size = (u8)(msg[3]>>8 & 0x000000FF);
+ u8 trl_count = (u8)(msg[3] & 0x000000FF);
+ u32 *pskb = &msg[6];
+
+ while (trl_count--) {
+ dprintk(KERN_DEBUG "%s: Releasing unused sk_buff %p (trl_count=%d).\n",
+ dev->name, (struct sk_buff*)(*pskb),trl_count+1);
+ dev_kfree_skb_irq((struct sk_buff *)(*pskb));
+ pskb += 1 + trl_elem_size;
+ atomic_dec(&priv->buckets_out);
}
-
- return 0;
}
+/*
+ * i2o_lan_event_reply(): Handle events.
+ */
+static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ struct i2o_reply {
+ u8 version_offset;
+ u8 msg_flags;
+ u16 msg_size;
+ u32 tid:12;
+ u32 initiator:12;
+ u32 function:8;
+ u32 initiator_context;
+ u32 transaction_context;
+ u32 evt_indicator;
+ u32 data[(iop->inbound_size - 20) / 4]; /* max */
+ } *evt = (struct i2o_reply *)msg;
+ int evt_data_len = (evt->msg_size - 5) * 4; /* real */
+
+ printk(KERN_INFO "%s: I2O event - ", dev->name);
+
+ if (evt->function == I2O_CMD_UTIL_EVT_ACK) {
+ printk("Event acknowledgement reply.\n");
+ return;
+ }
+
+ /* Else evt->function == I2O_CMD_UTIL_EVT_REGISTER) */
+
+ switch (evt->evt_indicator) {
+ case I2O_EVT_IND_STATE_CHANGE: {
+ struct state_data {
+ u16 status;
+ u8 state;
+ u8 data;
+ } *evt_data = (struct state_data *)(evt->data[0]);
+
+ printk("State chance 0x%08x.\n", evt->data[0]);
+
+ /* If the DDM is in error state, recovery may be
+ * possible if status = Transmit or Receive Control
+ * Unit Inoperable.
+ */
+ if (evt_data->state==0x05 && evt_data->status==0x0003)
+ i2o_lan_reset(dev);
+ break;
+ }
+
+ case I2O_EVT_IND_GENERAL_WARNING:
+ printk("General warning 0x%04x.\n", evt->data[0]);
+ break;
+
+ case I2O_EVT_IND_CONFIGURATION_FLAG:
+ printk("Configuration requested.\n");
+ break;
+
+ case I2O_EVT_IND_CAPABILITY_CHANGE:
+ printk("Capability change 0x%04x.\n", evt->data[0]);
+ break;
+
+ case I2O_EVT_IND_DEVICE_RESET:
+ /* Spec 2.0 p. 6-121:
+ * The event of _DEVICE_RESET should also be responded
+ */
+ printk("Device reset.\n");
+ if (i2o_event_ack(iop, msg) < 0)
+ printk("%s: Event Acknowledge timeout.\n", dev->name);
+ break;
+
+ case I2O_EVT_IND_EVT_MASK_MODIFIED:
+ printk("Event mask modified, 0x%08x.\n", evt->data[0]);
+ break;
+
+ case I2O_EVT_IND_FIELD_MODIFIED: {
+ u16 *work16 = (u16 *)evt->data;
+ printk("Group 0x%04x, field %d changed.\n", work16[0],
+ work16[1]);
+ break;
+ }
+
+ case I2O_EVT_IND_VENDOR_EVT: {
+ int i;
+ printk("Vendor event:\n");
+ for (i = 0; i < evt_data_len / 4; i++)
+ printk(" 0x%08x\n", evt->data[i]);
+ break;
+ }
+
+ case I2O_EVT_IND_DEVICE_STATE:
+ printk("Device state changed 0x%08x.\n", evt->data[0]);
+ break;
+
+ case I2O_LAN_EVT_LINK_DOWN:
+ printk("Link to the physical device is lost.\n");
+ break;
+
+ case I2O_LAN_EVT_LINK_UP:
+ printk("Link to the physical device is (re)established.\n");
+ break;
+
+ case I2O_LAN_EVT_MEDIA_CHANGE:
+ printk("Media change.\n");
+ break;
+
+ default:
+ printk("Event Indicator = 0x%08x.\n", evt->evt_indicator);
+ }
+
+ /* Note: EventAck necessary only for events that cause the device to
+ * syncronize with the user.
+ */
+}
/*
* i2o_lan_receive_post(): Post buckets to receive packets.
@@ -369,93 +581,108 @@
struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
struct sk_buff *skb;
- u32 m; u32 *msg;
- u32 bucket_len = (dev->mtu + dev->hard_header_len);
- u32 total = bucketpost - priv->bucket_count;
- u32 bucket_count;
- u32 *sgl_elem;
-
- while (total) {
- m = I2O_POST_READ32(iop);
- if (m == 0xFFFFFFFF)
- return -ETIMEDOUT;
- msg = (u32 *)(iop->mem_offset + m);
-
- bucket_count = (total >= priv->sgl_max) ? priv->sgl_max : total;
- total -= bucket_count;
- priv->bucket_count += bucket_count;
+ u32 m, *msg;
+ u32 bucket_len = (dev->mtu + dev->hard_header_len);
+ u32 total = priv->max_buckets_out - atomic_read(&priv->buckets_out);
+ u32 bucket_count;
+ u32 *sgl_elem;
+ unsigned long flags;
+
+ /* Send (total/bucket_count) separate I2O requests */
+
+ while (total) {
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = (u32 *)(iop->mem_offset + m);
+
+ bucket_count = (total >= priv->sgl_max) ? priv->sgl_max : total;
+ total -= bucket_count;
+ atomic_add(bucket_count, &priv->buckets_out);
- dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN HDM.\n",
- dev->name, bucket_count, bucket_len);
+ dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN DDM.\n",
+ dev->name, bucket_count, bucket_len);
+
+ /* Fill in the header */
__raw_writel(I2O_MESSAGE_SIZE(4 + 3 * bucket_count) | SGL_OFFSET_4, msg);
- __raw_writel(LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->lct_data->tid, msg+1);
- __raw_writel(priv->unit << 16 | lan_context, msg+2);
+ __raw_writel(LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
+ __raw_writel(priv->unit << 16 | lan_receive_context, msg+2);
__raw_writel(bucket_count, msg+3);
- sgl_elem = &msg[4];
+ sgl_elem = &msg[4];
+
+ /* Fill in the payload - contains bucket_count SGL elements */
- while (bucket_count--) {
- if (priv->i2o_fbl_tail >= 0)
- skb = priv->i2o_fbl[priv->i2o_fbl_tail--];
- else {
- skb = dev_alloc_skb(bucket_len + 2);
- if (skb == NULL)
- return -ENOMEM;
- skb_reserve(skb, 2);
- }
- __raw_writel(0x51000000 | bucket_len, sgl_elem);
- __raw_writel((u32)skb, sgl_elem+1);
- __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
- sgl_elem += 3;
- }
+ while (bucket_count--) {
+ spin_lock_irqsave(&priv->fbl_lock, flags);
+ if (priv->i2o_fbl_tail >= 0)
+ skb = priv->i2o_fbl[priv->i2o_fbl_tail--];
+ else {
+ skb = dev_alloc_skb(bucket_len + 2);
+ if (skb == NULL) {
+ spin_unlock_irqrestore(&priv->fbl_lock, flags);
+ return -ENOMEM;
+ }
+ skb_reserve(skb, 2);
+ }
+ spin_unlock_irqrestore(&priv->fbl_lock, flags);
- /* set LE flag and post buckets */
+ __raw_writel(0x51000000 | bucket_len, sgl_elem);
+ __raw_writel((u32)skb, sgl_elem+1);
+ __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
+ sgl_elem += 3;
+ }
+
+ /* set LE flag and post */
__raw_writel(__raw_readl(sgl_elem-3) | 0x80000000, (sgl_elem-3));
- i2o_post_message(iop,m);
- }
+ i2o_post_message(iop, m);
+ }
- return 0;
+ return 0;
}
+/* Functions called from the network stack, and functions called by them:
+========================================================================*/
+
/*
* i2o_lan_reset(): Reset the LAN adapter into the operational state and
* restore it to full operation.
*/
static int i2o_lan_reset(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
- struct i2o_controller *iop = i2o_dev->controller;
+ struct i2o_controller *iop = i2o_dev->controller;
u32 msg[5];
dprintk(KERN_INFO "%s: LAN RESET MESSAGE.\n", dev->name);
msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
- msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->lct_data->tid;
+ msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
msg[3] = 0; // TransactionContext
- msg[4] = 1 << 16; // return posted buckets
+ msg[4] = 0; // keep posted buckets
if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
- return -ETIMEDOUT;
+ return -ETIMEDOUT;
return 0;
}
/*
* i2o_lan_suspend(): Put LAN adapter into a safe, non-active state.
- * Reply to any LAN class message with status error_no_data_transfer
+ * IOP replies to any LAN class message with status error_no_data_transfer
* / suspended.
*/
static int i2o_lan_suspend(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
- struct i2o_controller *iop = i2o_dev->controller;
+ struct i2o_controller *iop = i2o_dev->controller;
u32 msg[5];
dprintk(KERN_INFO "%s: LAN SUSPEND MESSAGE.\n", dev->name);
msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
- msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->lct_data->tid;
+ msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
msg[3] = 0; // TransactionContext
msg[4] = 1 << 16; // return posted buckets
@@ -471,75 +698,91 @@
*/
static void i2o_set_batch_mode(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- struct i2o_device *i2o_dev = priv->i2o_dev;
- struct i2o_controller *iop = i2o_dev->controller;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
u32 val;
- /* set LAN_BATCH_CONTROL attributes */
+ /* Set defaults LAN_BATCH_CONTROL attributes */
+ /* May be changed via /proc or Configuration Utility */
- // enable batch mode, toggle automatically
- val = 0x00000000;
- if (i2o_set_scalar(iop, i2o_dev->lct_data->tid, 0x0003, 0, &val, sizeof(val)) <0)
+ val = 0x00000000; // enable batch mode, toggle automatically
+ if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0003, 0, &val, sizeof(val)) <0)
printk(KERN_WARNING "%s: Unable to enter I2O LAN batch mode.\n",
- dev->name);
+ dev->name);
else
- dprintk(KERN_INFO "%s: I2O LAN batch mode enabled.\n",dev->name);
+ dprintk(KERN_INFO "%s: I2O LAN batch mode enabled.\n", dev->name);
+
+ /* Set LAN_OPERATION attributes */
+
+#ifdef DRIVERDEBUG
+/* Added for testing: this will be removed */
+ val = 0x00000003; // 1 = UserFlags
+ if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 1, &val, sizeof(val)) < 0)
+ printk(KERN_WARNING "%s: Can't enable ErrorReporting & BadPacketHandling.\n",
+ dev->name);
+ else
+ dprintk(KERN_INFO "%s: ErrorReporting enabled, "
+ "BadPacketHandling enabled.\n", dev->name);
+#endif /* DRIVERDEBUG */
/*
* When PacketOrphanlimit is same as the maximum packet length,
* the packets will never be split into two separate buckets
*/
-
- /* set LAN_OPERATION attributes */
-
- val = dev->mtu + dev->hard_header_len; // PacketOrphanLimit
- if (i2o_set_scalar(iop, i2o_dev->lct_data->tid, 0x0004, 2, &val, sizeof(val)) < 0)
+ val = dev->mtu + dev->hard_header_len; // 2 = PacketOrphanLimit
+ if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 2, &val, sizeof(val)) < 0)
printk(KERN_WARNING "%s: Unable to set PacketOrphanLimit.\n",
- dev->name);
+ dev->name);
else
dprintk(KERN_INFO "%s: PacketOrphanLimit set to %d.\n",
- dev->name,val);
-
- return;
+ dev->name, val);
+
+ return;
}
+/* Functions called from the network stack:
+==========================================*/
+
/*
* i2o_lan_open(): Open the device to send/receive packets via
- * the network device.
+ * the network device.
*/
static int i2o_lan_open(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
-#if 0
struct i2o_controller *iop = i2o_dev->controller;
- u32 evt_mask = 0xFFC00007; // All generic events, all lan events
-#endif
- if (i2o_claim_device(i2o_dev, &i2o_lan_handler, I2O_CLAIM_PRIMARY)) {
+
+ MOD_INC_USE_COUNT;
+
+ if (i2o_claim_device(i2o_dev, &i2o_lan_handler)) {
printk(KERN_WARNING "%s: Unable to claim the I2O LAN device.\n", dev->name);
+ MOD_DEC_USE_COUNT;
return -EAGAIN;
}
- dprintk(KERN_INFO "%s: I2O LAN device claimed (tid=%d).\n",
- dev->name, i2o_dev->lct_data->tid);
-#if 0
- if (i2o_event_register(iop, i2o_dev->lct_data->tid,
- priv->unit << 16 | lan_context, evt_mask) < 0)
+ dprintk(KERN_INFO "%s: I2O LAN device (tid=%d) claimed by LAN OSM.\n",
+ dev->name, i2o_dev->lct_data.tid);
+
+ if (i2o_event_register(iop, i2o_dev->lct_data.tid,
+ priv->unit << 16 | lan_context, 0, priv->i2o_event_mask) < 0)
printk(KERN_WARNING "%s: Unable to set the event mask.\n", dev->name);
-#endif
+
i2o_lan_reset(dev);
-
- priv->i2o_fbl = kmalloc(bucketpost * sizeof(struct sk_buff *),GFP_KERNEL);
- if (priv->i2o_fbl == NULL)
+
+ priv->i2o_fbl = kmalloc(priv->max_buckets_out * sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (priv->i2o_fbl == NULL) {
+ MOD_DEC_USE_COUNT;
return -ENOMEM;
+ }
priv->i2o_fbl_tail = -1;
-
- netif_start_queue(dev);
+ priv->send_active = 0;
i2o_set_batch_mode(dev);
i2o_lan_receive_post(dev);
- MOD_INC_USE_COUNT;
+ netif_start_queue(dev);
return 0;
}
@@ -549,22 +792,24 @@
*/
static int i2o_lan_close(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- struct i2o_device *i2o_dev = priv->i2o_dev;
-#if 0
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
- if (i2o_event_register(iop, i2o_dev->lct_data->tid,
- priv->unit << 16 | lan_context, 0) < 0)
- printk(KERN_WARNING "%s: Unable to clear the event mask.\n",
-#endif dev->name);
-
netif_stop_queue(dev);
+
i2o_lan_suspend(dev);
- if (i2o_release_device(i2o_dev, &i2o_lan_handler, I2O_CLAIM_PRIMARY))
+ if (i2o_event_register(iop, i2o_dev->lct_data.tid,
+ priv->unit << 16 | lan_context, 0, 0) < 0)
+ printk(KERN_WARNING "%s: Unable to clear the event mask.\n",
+ dev->name);
+
+ if (i2o_release_device(i2o_dev, &i2o_lan_handler)) {
printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device "
- "(tid=%d).\n", dev->name, i2o_dev->lct_data->tid);
+ "(tid=%d).\n", dev->name, i2o_dev->lct_data.tid);
+ return -EBUSY;
+ }
while (priv->i2o_fbl_tail >= 0)
dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
@@ -575,28 +820,115 @@
return 0;
}
-#if 0
/*
- * i2o_lan_sdu_send(): Send a packet, MAC header added by the HDM.
- * Must be supported by Fibre Channel, optional for Ethernet/802.3,
- * Token Ring, FDDI
+ * i2o_lan_tx_timeout(): Tx timeout handler.
*/
-static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
-{
- return -EINVAL;
+static void i2o_lan_tx_timeout(struct net_device *dev)
+{
+ if (!netif_queue_stopped(dev))
+ netif_start_queue(dev);
}
-#endif
+#define batching(x, cond) ( (x)->tx_batch_mode==1 || ((x)->tx_batch_mode==2 && (cond)) )
+
+/*
+ * Batch send packets. Both i2o_lan_sdu_send and i2o_lan_packet_send
+ * use this. I'm still not pleased. If you come up with
+ * something better, please tell me. -taneli
+ */
static void i2o_lan_batch_send(struct net_device *dev)
-{
+{
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_controller *iop = priv->i2o_dev->controller;
+ spin_lock_irq(&priv->tx_lock);
if (priv->tx_count != 0) {
+ dev->trans_start = jiffies;
i2o_post_message(iop, priv->m);
- dprintk("%s: %d packets sent.\n", dev->name, priv->tx_count);
+ dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
priv->tx_count = 0;
}
+ spin_unlock_irq(&priv->tx_lock);
+
+ priv->send_active = 0;
+}
+
+/*
+ * i2o_lan_sdu_send(): Send a packet, MAC header added by the DDM.
+ * Must be supported by Fibre Channel, optional for Ethernet/802.3,
+ * Token Ring, FDDI
+ */
+
+/*
+ * This is a coarse first approximation. Needs testing. Any takers? -taneli
+ */
+static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ int tickssofar = jiffies - dev->trans_start;
+ u32 m, *msg;
+ u32 *sgl_elem;
+
+ spin_lock_irq(&priv->tx_lock);
+
+ priv->tx_count++;
+ atomic_inc(&priv->tx_out);
+
+ if (priv->tx_count == 1) {
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF) {
+ spin_unlock_irq(&priv->tx_lock);
+ return 1;
+ }
+ msg = (u32 *)(iop->mem_offset + m);
+ priv->m = m;
+
+ __raw_writel(NINE_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
+ __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
+ __raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
+ __raw_writel(1 << 3, msg+3); // TransmitControlWord
+
+ __raw_writel(0xD7000000 | skb->len, msg+4); // MAC hdr included
+ __raw_writel((u32)skb, msg+5); // TransactionContext
+ __raw_writel(virt_to_bus(skb->data), msg+6);
+ __raw_writel((u32)skb->mac.raw, msg+7);
+ __raw_writel((u32)skb->mac.raw+4, msg+8);
+ if (batching(priv, !tickssofar) && !priv->send_active) {
+ priv->send_active = 1;
+ queue_task(&priv->i2o_batch_send_task, &tq_scheduler);
+ }
+ } else { /* Add new SGL element to the previous message frame */
+
+ msg = (u32 *)(iop->mem_offset + priv->m);
+ sgl_elem = &msg[priv->tx_count * 5 + 1];
+
+ __raw_writel(I2O_MESSAGE_SIZE((__raw_readl(msg)>>16) + 5) | 1<<12 | SGL_OFFSET_4, msg);
+ __raw_writel(__raw_readl(sgl_elem-5) & 0x7FFFFFFF, sgl_elem-5); /* clear LE flag */
+ __raw_writel(0xD5000000 | skb->len, sgl_elem);
+ __raw_writel((u32)skb, sgl_elem+1);
+ __raw_writel(virt_to_bus(skb->data), sgl_elem+2);
+ __raw_writel((u32)(skb->mac.raw), sgl_elem+3);
+ __raw_writel((u32)(skb->mac.raw)+1, sgl_elem+4);
+ }
+
+ /* If tx not in batch mode or frame is full, send immediatelly */
+
+ if (!batching(priv, !tickssofar) || priv->tx_count == priv->sgl_max) {
+ dev->trans_start = jiffies;
+ i2o_post_message(iop, priv->m);
+ dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
+ priv->tx_count = 0;
+ }
+
+ /* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
+
+ if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&priv->tx_lock);
+ return 0;
}
/*
@@ -610,37 +942,37 @@
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
+ int tickssofar = jiffies - dev->trans_start;
u32 m, *msg;
u32 *sgl_elem;
+ spin_lock_irq(&priv->tx_lock);
+
priv->tx_count++;
- priv->tx_out++;
+ atomic_inc(&priv->tx_out);
if (priv->tx_count == 1) {
- dprintk("%s: New message frame\n", dev->name);
-
m = I2O_POST_READ32(iop);
if (m == 0xFFFFFFFF) {
- dev_kfree_skb(skb);
- return -ETIMEDOUT;
+ spin_unlock_irq(&priv->tx_lock);
+ return 1;
}
msg = (u32 *)(iop->mem_offset + m);
priv->m = m;
__raw_writel(SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
- __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data->tid, msg+1);
- __raw_writel(priv->unit << 16 | lan_context, msg+2); // InitiatorContext
- __raw_writel(1 << 4, msg+3); // TransmitControlWord
+ __raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
+ __raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
+ __raw_writel(1 << 3, msg+3); // TransmitControlWord
+
__raw_writel(0xD5000000 | skb->len, msg+4); // MAC hdr included
__raw_writel((u32)skb, msg+5); // TransactionContext
__raw_writel(virt_to_bus(skb->data), msg+6);
-
- queue_task(&priv->i2o_batch_send_task, &tq_scheduler);
-
+ if (batching(priv, !tickssofar) && !priv->send_active) {
+ priv->send_active = 1;
+ queue_task(&priv->i2o_batch_send_task, &tq_scheduler);
+ }
} else { /* Add new SGL element to the previous message frame */
-
- dprintk("%s: Adding packet %d to msg frame\n",
- dev->name, priv->tx_count);
msg = (u32 *)(iop->mem_offset + priv->m);
sgl_elem = &msg[priv->tx_count * 3 + 1];
@@ -650,19 +982,23 @@
__raw_writel(0xD5000000 | skb->len, sgl_elem);
__raw_writel((u32)skb, sgl_elem+1);
__raw_writel(virt_to_bus(skb->data), sgl_elem+2);
+ }
- if (priv->tx_count == priv->sgl_max) { /* frame full, send now */
- i2o_post_message(iop, priv->m);
- dprintk("%s: %d packets sent.\n", dev->name, priv->tx_count);
- priv->tx_count = 0;
- }
+ /* If tx not in batch mode or frame is full, send immediatelly */
+
+ if (!batching(priv, !tickssofar) || priv->tx_count == priv->sgl_max) {
+ dev->trans_start = jiffies;
+ i2o_post_message(iop, priv->m);
+ dprintk(KERN_DEBUG"%s: %d packets sent.\n", dev->name, priv->tx_count);
+ priv->tx_count = 0;
}
-
- /* If HDMs TxMaxPktOut reached, stay busy (don't clean tbusy) */
- if (priv->tx_out >= priv->tx_max_out)
+ /* If DDMs TxMaxPktOut reached, stop queueing layer to send more */
+
+ if (atomic_read(&priv->tx_out) >= priv->tx_max_out)
netif_stop_queue(dev);
-
+
+ spin_unlock_irq(&priv->tx_lock);
return 0;
}
@@ -671,65 +1007,64 @@
*/
static struct net_device_stats *i2o_lan_get_stats(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
u64 val64[16];
u64 supported_group[4] = { 0, 0, 0, 0 };
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0100, -1, val64,
- sizeof(val64)) < 0)
- printk("%s: Unable to query LAN_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0100, -1, val64,
+ sizeof(val64)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_HISTORICAL_STATS.\n", dev->name);
else {
- dprintk("%s: LAN_HISTORICAL_STATS queried.\n",dev->name);
- priv->stats.tx_packets = val64[0];
- priv->stats.tx_bytes = val64[1];
- priv->stats.rx_packets = val64[2];
- priv->stats.rx_bytes = val64[3];
- priv->stats.tx_errors = val64[4];
- priv->stats.rx_errors = val64[5];
+ dprintk(KERN_DEBUG "%s: LAN_HISTORICAL_STATS queried.\n", dev->name);
+ priv->stats.tx_packets = val64[0];
+ priv->stats.tx_bytes = val64[1];
+ priv->stats.rx_packets = val64[2];
+ priv->stats.rx_bytes = val64[3];
+ priv->stats.tx_errors = val64[4];
+ priv->stats.rx_errors = val64[5];
priv->stats.rx_dropped = val64[6];
}
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0180, -1,
- &supported_group, sizeof(supported_group)) < 0)
- printk("%s: Unable to query LAN_SUPPORTED_OPTIONAL_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0180, -1,
+ &supported_group, sizeof(supported_group)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_OPTIONAL_HISTORICAL_STATS.\n", dev->name);
if (supported_group[2]) {
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0183, -1,
- val64, sizeof(val64)) < 0)
- printk("%s: Unable to query LAN_OPTIONAL_RX_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0183, -1,
+ val64, sizeof(val64)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_RX_HISTORICAL_STATS.\n", dev->name);
else {
- dprintk("%s: LAN_OPTIONAL_RX_HISTORICAL_STATS queried.\n",dev->name);
- priv->stats.multicast = val64[4];
+ dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_RX_HISTORICAL_STATS queried.\n", dev->name);
+ priv->stats.multicast = val64[4];
priv->stats.rx_length_errors = val64[10];
priv->stats.rx_crc_errors = val64[0];
}
}
- if (i2o_dev->lct_data->sub_class == I2O_LAN_ETHERNET) {
- u64 supported_stats = 0;
-
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0200, -1,
- val64, sizeof(val64)) < 0)
- printk("%s: Unable to query LAN_802_3_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET) {
+ u64 supported_stats = 0;
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0200, -1,
+ val64, sizeof(val64)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_802_3_HISTORICAL_STATS.\n", dev->name);
else {
- dprintk("%s: LAN_802_3_HISTORICAL_STATS queried.\n",dev->name);
+ dprintk(KERN_DEBUG "%s: LAN_802_3_HISTORICAL_STATS queried.\n", dev->name);
priv->stats.transmit_collision = val64[1] + val64[2];
- priv->stats.rx_frame_errors = val64[0];
+ priv->stats.rx_frame_errors = val64[0];
priv->stats.tx_carrier_errors = val64[6];
}
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0280, -1,
- &supported_stats, sizeof(supported_stats)) < 0)
- printk("%s: Unable to query LAN_SUPPORTED_802_3_HISTORICAL_STATS.\n", dev->name);
-
- if (supported_stats != 0) {
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0281, -1,
- val64, sizeof(val64)) < 0)
- printk("%s: Unable to query LAN_OPTIONAL_802_3_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0280, -1,
+ &supported_stats, sizeof(supported_stats)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_SUPPORTED_802_3_HISTORICAL_STATS.\n", dev->name);
+
+ if (supported_stats != 0) {
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0281, -1,
+ val64, sizeof(val64)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_OPTIONAL_802_3_HISTORICAL_STATS.\n", dev->name);
else {
- dprintk("%s: LAN_OPTIONAL_802_3_HISTORICAL_STATS queried.\n",dev->name);
+ dprintk(KERN_DEBUG "%s: LAN_OPTIONAL_802_3_HISTORICAL_STATS queried.\n", dev->name);
if (supported_stats & 0x1)
priv->stats.rx_over_errors = val64[0];
if (supported_stats & 0x4)
@@ -739,14 +1074,14 @@
}
#ifdef CONFIG_TR
- if (i2o_dev->lct_data->sub_class == I2O_LAN_TR) {
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0300, -1,
- val64, sizeof(val64)) < 0)
- printk("%s: Unable to query LAN_802_5_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_dev->lct_data.sub_class == I2O_LAN_TR) {
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0300, -1,
+ val64, sizeof(val64)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_802_5_HISTORICAL_STATS.\n", dev->name);
else {
struct tr_statistics *stats =
- (struct tr_statistics *)&priv->stats;
- dprintk("%s: LAN_802_5_HISTORICAL_STATS queried.\n",dev->name);
+ (struct tr_statistics *)&priv->stats;
+ dprintk(KERN_DEBUG "%s: LAN_802_5_HISTORICAL_STATS queried.\n", dev->name);
stats->line_errors = val64[0];
stats->internal_errors = val64[7];
@@ -764,25 +1099,29 @@
#endif
#ifdef CONFIG_FDDI
- if (i2o_dev->lct_data->sub_class == I2O_LAN_FDDI) {
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0400, -1,
- val64, sizeof(val64)) < 0)
- printk("%s: Unable to query LAN_FDDI_HISTORICAL_STATS.\n",dev->name);
+ if (i2o_dev->lct_data.sub_class == I2O_LAN_FDDI) {
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0400, -1,
+ val64, sizeof(val64)) < 0)
+ printk(KERN_INFO "%s: Unable to query LAN_FDDI_HISTORICAL_STATS.\n", dev->name);
else {
- dprintk("%s: LAN_FDDI_HISTORICAL_STATS queried.\n",dev->name);
+ dprintk(KERN_DEBUG "%s: LAN_FDDI_HISTORICAL_STATS queried.\n", dev->name);
priv->stats.smt_cf_state = val64[0];
memcpy(priv->stats.mac_upstream_nbr, &val64[1], FDDI_K_ALEN);
- memcpy(priv->stats.mac_downstream_nbr, &val64[2], FDDI_K_ALEN);
+ memcpy(priv->stats.mac_downstream_nbr, &val64[2], FDDI_K_ALEN);
priv->stats.mac_error_cts = val64[3];
priv->stats.mac_lost_cts = val64[4];
priv->stats.mac_rmt_state = val64[5];
memcpy(priv->stats.port_lct_fail_cts, &val64[6], 8);
- memcpy(priv->stats.port_lem_reject_cts, &val64[7], 8);
+ memcpy(priv->stats.port_lem_reject_cts, &val64[7], 8);
memcpy(priv->stats.port_lem_cts, &val64[8], 8);
memcpy(priv->stats.port_pcm_state, &val64[9], 8);
}
/* FDDI optional stats not yet defined */
- }
+ }
+#endif
+
+#ifdef CONFIG_NET_FC
+ /* Fibre Channel Statistics not yet defined in 1.53 nor 2.0 */
#endif
return (struct net_device_stats *)&priv->stats;
@@ -795,70 +1134,79 @@
static void i2o_lan_set_mc_list(struct net_device *dev)
{
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- struct i2o_device *i2o_dev = priv->i2o_dev;
- struct i2o_controller *iop = i2o_dev->controller;
- u32 filter_mask;
- u32 max_size_mc_table;
- u32 mc_addr_group[64];
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 filter_mask;
+ u32 max_size_mc_table;
+ u32 mc_addr_group[64];
+
+// This isn't safe yet. Needs to be async.
+return;
+
+// read_lock_bh(&dev_mc_lock);
+// spin_lock(&dev->xmit_lock);
+// dev->xmit_lock_owner = smp_processor_id();
- if (i2o_query_scalar(iop, i2o_dev->lct_data->tid, 0x0001, -1,
- &mc_addr_group, sizeof(mc_addr_group)) < 0 ) {
+ if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0001, -1,
+ &mc_addr_group, sizeof(mc_addr_group)) < 0 ) {
printk(KERN_WARNING "%s: Unable to query LAN_MAC_ADDRESS group.\n", dev->name);
- return;
- }
-
- max_size_mc_table = mc_addr_group[8];
+ return;
+ }
- if (dev->flags & IFF_PROMISC) {
- filter_mask = 0x00000002;
- dprintk(KERN_INFO "%s: Enabling promiscuous mode...\n", dev->name);
- }
-
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > max_size_mc_table) {
- filter_mask = 0x00000004;
- dprintk(KERN_INFO "%s: Enabling all multicast mode...\n", dev->name);
- }
+ max_size_mc_table = mc_addr_group[8];
- else if (dev->mc_count) {
- struct dev_mc_list *mc;
+ if (dev->flags & IFF_PROMISC) {
+ filter_mask = 0x00000002;
+ printk(KERN_INFO "%s: Enabling promiscuous mode...\n", dev->name);
+ } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > max_size_mc_table) {
+ filter_mask = 0x00000004;
+ printk(KERN_INFO "%s: Enabling all multicast mode...\n", dev->name);
+ } else if (dev->mc_count) {
+ struct dev_mc_list *mc;
u8 mc_table[2 + 8 * dev->mc_count]; // RowCount, Addresses
u64 *work64 = (u64 *)(mc_table + 2);
- filter_mask = 0x00000000;
- dprintk(KERN_INFO "%s: Enabling multicast mode...\n", dev->name);
+ filter_mask = 0x00000000;
+ printk(KERN_INFO "%s: Enabling multicast mode...\n", dev->name);
- /* Fill multicast addr table */
+ /* Fill multicast addr table */
memset(mc_table, 0, sizeof(mc_table));
- memcpy(mc_table, &dev->mc_count, 2);
- for (mc = dev->mc_list; mc ; mc = mc->next, work64++ )
- memcpy(work64, mc->dmi_addr, mc->dmi_addrlen);
-
+ memcpy(mc_table, &dev->mc_count, 2);
+ for (mc = dev->mc_list; mc ; mc = mc->next, work64++ )
+ memcpy(work64, mc->dmi_addr, mc->dmi_addrlen);
+
/* Clear old mc table, copy new table to <iop,tid> */
- if (i2o_clear_table(iop, i2o_dev->lct_data->tid, 0x0002) < 0)
- printk("%s: Unable to clear LAN_MULTICAST_MAC_ADDRESS table.\n",dev->name);
+ if (i2o_clear_table(iop, i2o_dev->lct_data.tid, 0x0002) < 0)
+ printk(KERN_INFO "%s: Unable to clear LAN_MULTICAST_MAC_ADDRESS table.\n", dev->name);
- if ((i2o_row_add_table(iop, i2o_dev->lct_data->tid, 0x0002, -1,
- mc_table, sizeof(mc_table))) < 0)
- printk("%s: Unable to set LAN_MULTICAST_MAC_ADDRESS table.\n",dev->name);
- }
-
- else {
- filter_mask = 0x00000300; // Broadcast, Multicast disabled
- printk(KERN_INFO "%s: Enabling unicast mode...\n",dev->name);
- }
+ if ((i2o_row_add_table(iop, i2o_dev->lct_data.tid, 0x0002, -1,
+ mc_table, sizeof(mc_table))) < 0)
+ printk(KERN_INFO "%s: Unable to set LAN_MULTICAST_MAC_ADDRESS table.\n", dev->name);
+ } else {
+ filter_mask = 0x00000300; // Broadcast, Multicast disabled
+ printk(KERN_INFO "%s: Enabling unicast mode...\n", dev->name);
+ }
/* Finally copy new FilterMask to <iop,tid> */
- if (i2o_set_scalar(iop, i2o_dev->lct_data->tid, 0x0001, 3,
- &filter_mask, sizeof(filter_mask)) <0)
- printk(KERN_WARNING "%s: Unable to set MAC FilterMask.\n",dev->name);
+ if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0001, 3,
+ &filter_mask, sizeof(filter_mask)) <0)
+ printk(KERN_WARNING "%s: Unable to set MAC FilterMask.\n", dev->name);
+
+ dev->xmit_lock_owner = -1;
+ spin_unlock(&dev->xmit_lock);
+// read_unlock_bh(&dev_mc_lock);
- return;
+ return;
}
+static struct tq_struct i2o_lan_set_mc_list_task = {
+ 0, 0, (void (*)(void *))i2o_lan_set_mc_list, (void *) 0
+};
+
/*
* i2o_lan_set_multicast_list():
* Queue routine i2o_lan_set_mc_list() to be called later.
@@ -867,17 +1215,12 @@
static void i2o_lan_set_multicast_list(struct net_device *dev)
{
- struct tq_struct *task;
-
- task = (struct tq_struct *)kmalloc(sizeof(struct tq_struct), GFP_KERNEL);
- if (task == NULL)
- return;
-
- task->next = NULL;
- task->sync = 0;
- task->routine = (void *)i2o_lan_set_mc_list;
- task->data = (void *)dev;
- queue_task(task, &tq_scheduler);
+ if (!in_interrupt()) {
+ i2o_lan_set_mc_list_task.data = (void *)dev;
+ queue_task(&i2o_lan_set_mc_list_task, &tq_scheduler);
+ } else {
+ i2o_lan_set_mc_list(dev);
+ }
}
/*
@@ -885,13 +1228,24 @@
*/
static int i2o_lan_change_mtu(struct net_device *dev, int new_mtu)
{
- if ((new_mtu < 68) || (new_mtu > 9000))
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ u32 max_pkt_size;
+
+ if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
+ 0x0000, 6, &max_pkt_size, 4) < 0)
+ return -EFAULT;
+
+ if (new_mtu < 68 || max_pkt_size < new_mtu)
return -EINVAL;
-
+
dev->mtu = new_mtu;
return 0;
}
+/* Functions to initialize I2O LAN OSM:
+======================================*/
+
/*
* i2o_lan_register_device(): Register LAN class device to kernel.
*/
@@ -904,9 +1258,9 @@
unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
void (*unregister_dev)(struct net_device *dev);
- switch (i2o_dev->lct_data->sub_class) {
+ switch (i2o_dev->lct_data.sub_class) {
case I2O_LAN_ETHERNET:
- dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
+ dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
if (dev == NULL)
return NULL;
type_trans = eth_type_trans;
@@ -916,6 +1270,7 @@
#ifdef CONFIG_ANYLAN
case I2O_LAN_100VG:
printk(KERN_ERR "i2o_lan: 100base VG not yet supported.\n");
+ return NULL;
break;
#endif
@@ -925,7 +1280,7 @@
if (dev==NULL)
return NULL;
type_trans = tr_type_trans;
- unregister_dev = unregister_trdev;
+ unregister_dev = unregister_trdev;
break;
#endif
@@ -933,92 +1288,122 @@
case I2O_LAN_FDDI:
{
int size = sizeof(struct net_device) + sizeof(struct i2o_lan_local)
- + sizeof("fddi%d ");
+ + sizeof("fddi%d ");
- dev = (struct net_device *) kmalloc(size, GFP_KERNEL);
- memset((char *)dev, 0, size);
- dev->priv = (void *)(dev + 1);
- dev->name = (char *)(dev + 1) + sizeof(struct i2o_lan_local);
+ dev = (struct net_device *) kmalloc(size, GFP_KERNEL);
+ if (dev == NULL)
+ return NULL;
+ memset((char *)dev, 0, size);
+ dev->priv = (void *)(dev + 1);
+ dev->name = (char *)(dev + 1) + sizeof(struct i2o_lan_local);
- if (dev_alloc_name(dev,"fddi%d") < 0) {
+ if (dev_alloc_name(dev, "fddi%d") < 0) {
printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
kfree(dev);
return NULL;
}
type_trans = fddi_type_trans;
unregister_dev = (void *)unregister_netdevice;
-
+
fddi_setup(dev);
register_netdev(dev);
- }
+ }
break;
#endif
-#ifdef CONFIG_FIBRE_CHANNEL
+#ifdef CONFIG_NET_FC
case I2O_LAN_FIBRE_CHANNEL:
- printk(KERN_INFO "i2o_lan: Fibre Channel not yet supported.\n");
- break;
+ dev = init_fcdev(NULL, sizeof(struct i2o_lan_local));
+ if (dev == NULL)
+ return NULL;
+ type_trans = NULL;
+/* FIXME: Move fc_type_trans() from drivers/net/fc/iph5526.c to net/802/fc.c
+ * and export it in include/linux/fcdevice.h
+ * type_trans = fc_type_trans;
+ */
+ unregister_dev = (void *)unregister_fcdev;
+ break;
#endif
case I2O_LAN_UNKNOWN:
default:
- printk(KERN_ERR "i2o_lan: LAN type 0x%08X not supported.\n",
- i2o_dev->lct_data->sub_class);
+ printk(KERN_ERR "i2o_lan: LAN type 0x%04x not supported.\n",
+ i2o_dev->lct_data.sub_class);
return NULL;
}
priv = (struct i2o_lan_local *)dev->priv;
priv->i2o_dev = i2o_dev;
priv->type_trans = type_trans;
- priv->bucket_count = 0;
priv->sgl_max = (i2o_dev->controller->inbound_size - 16) / 12;
+ atomic_set(&priv->buckets_out, 0);
+
+ /* Set default values for user configurable parameters */
+ /* Private values are changed via /proc file system */
+
+ priv->max_buckets_out = max_buckets_out;
+ priv->bucket_thresh = bucket_thresh;
+ priv->rx_copybreak = rx_copybreak;
+ priv->tx_batch_mode = tx_batch_mode;
+ priv->i2o_event_mask = i2o_event_mask;
+
+ priv->tx_lock = SPIN_LOCK_UNLOCKED;
+ priv->fbl_lock = SPIN_LOCK_UNLOCKED;
unit++;
i2o_landevs[unit] = dev;
priv->unit = unit;
- if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data->tid,
+ if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
0x0001, 0, &hw_addr, sizeof(hw_addr)) < 0) {
- printk(KERN_ERR "%s: Unable to query hardware address.\n", dev->name);
+ printk(KERN_ERR "%s: Unable to query hardware address.\n", dev->name);
unit--;
unregister_dev(dev);
kfree(dev);
- return NULL;
+ return NULL;
}
- dprintk("%s: hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
- dev->name,hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
- hw_addr[4], hw_addr[5]);
+ dprintk(KERN_DEBUG "%s: hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name, hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
+ hw_addr[4], hw_addr[5]);
dev->addr_len = 6;
memcpy(dev->dev_addr, hw_addr, 6);
- if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data->tid,
- 0x0007, 2, &tx_max_out, sizeof(tx_max_out)) < 0)
- {
- printk(KERN_ERR "%s: Unable to query max TX queue.\n", dev->name);
- unit--;
- unregister_dev(dev);
- kfree(dev);
- return NULL;
- }
- dprintk(KERN_INFO "%s: Max TX Outstanding = %d.\n", dev->name, tx_max_out);
- priv->tx_max_out = tx_max_out;
- priv->tx_out = 0;
- priv->tx_count = 0;
- priv->lock = SPIN_LOCK_UNLOCKED;
+ if (i2o_query_scalar(i2o_dev->controller, i2o_dev->lct_data.tid,
+ 0x0007, 2, &tx_max_out, sizeof(tx_max_out)) < 0) {
+ printk(KERN_ERR "%s: Unable to query max TX queue.\n", dev->name);
+ unit--;
+ unregister_dev(dev);
+ kfree(dev);
+ return NULL;
+ }
+ dprintk(KERN_INFO "%s: Max TX Outstanding = %d.\n", dev->name, tx_max_out);
+ priv->tx_max_out = tx_max_out;
+ atomic_set(&priv->tx_out, 0);
+ priv->tx_count = 0;
priv->i2o_batch_send_task.next = NULL;
priv->i2o_batch_send_task.sync = 0;
priv->i2o_batch_send_task.routine = (void *)i2o_lan_batch_send;
priv->i2o_batch_send_task.data = (void *)dev;
- dev->open = i2o_lan_open;
- dev->stop = i2o_lan_close;
- dev->hard_start_xmit = i2o_lan_packet_send;
- dev->get_stats = i2o_lan_get_stats;
+ dev->open = i2o_lan_open;
+ dev->stop = i2o_lan_close;
+ dev->get_stats = i2o_lan_get_stats;
dev->set_multicast_list = i2o_lan_set_multicast_list;
- dev->change_mtu = i2o_lan_change_mtu;
+ dev->tx_timeout = i2o_lan_tx_timeout;
+ dev->watchdog_timeo = I2O_LAN_TX_TIMEOUT;
+
+#ifdef CONFIG_NET_FC
+ if (i2o_dev->lct_data.sub_class == I2O_LAN_FIBRE_CHANNEL)
+ dev->hard_start_xmit = i2o_lan_sdu_send;
+ else
+#endif
+ dev->hard_start_xmit = i2o_lan_packet_send;
+
+ if (i2o_dev->lct_data.sub_class == I2O_LAN_ETHERNET)
+ dev->change_mtu = i2o_lan_change_mtu;
return dev;
}
@@ -1032,19 +1417,35 @@
struct net_device *dev;
int i;
- printk(KERN_INFO "Linux I2O LAN OSM (c) 1999 University of Helsinki.\n");
+ printk(KERN_INFO "I2O LAN OSM (c) 1999 University of Helsinki.\n");
+
+ /* Module params used as global defaults for private values */
+
+ if (max_buckets_out > I2O_LAN_MAX_BUCKETS_OUT)
+ max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
+ if (bucket_thresh > max_buckets_out)
+ bucket_thresh = max_buckets_out;
+
+ /* Install handlers for incoming replies */
+
+ if (i2o_install_handler(&i2o_lan_send_handler) < 0) {
+ printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
+ return -EINVAL;
+ }
+ lan_send_context = i2o_lan_send_handler.context;
- if (bucketpost > I2O_BUCKET_COUNT)
- bucketpost = I2O_BUCKET_COUNT;
- if (bucketthresh > bucketpost)
- bucketthresh = bucketpost;
+ if (i2o_install_handler(&i2o_lan_receive_handler) < 0) {
+ printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
+ return -EINVAL;
+ }
+ lan_receive_context = i2o_lan_receive_handler.context;
if (i2o_install_handler(&i2o_lan_handler) < 0) {
printk(KERN_ERR "i2o_lan: Unable to register I2O LAN OSM.\n");
return -EINVAL;
}
lan_context = i2o_lan_handler.context;
-
+
for(i=0; i <= MAX_LAN_CARDS; i++)
i2o_landevs[i] = NULL;
@@ -1052,15 +1453,16 @@
struct i2o_controller *iop = i2o_find_controller(i);
struct i2o_device *i2o_dev;
- if (iop==NULL) continue;
+ if (iop==NULL)
+ continue;
for (i2o_dev=iop->devices;i2o_dev != NULL;i2o_dev=i2o_dev->next) {
- if (i2o_dev->lct_data->class_id != I2O_CLASS_LAN)
+ if (i2o_dev->lct_data.class_id != I2O_CLASS_LAN)
continue;
/* Make sure device not already claimed by an ISM */
- if (i2o_dev->lct_data->user_tid != 0xFFF)
+ if (i2o_dev->lct_data.user_tid != 0xFFF)
continue;
if (unit == MAX_LAN_CARDS) {
@@ -1071,14 +1473,16 @@
dev = i2o_lan_register_device(i2o_dev);
if (dev == NULL) {
- printk(KERN_ERR "i2o_lan: Unable to register I2O LAN device.\n");
- continue; // try next one
+ printk(KERN_ERR "i2o_lan: Unable to register I2O LAN device 0x%04x.\n",
+ i2o_dev->lct_data.sub_class);
+ continue;
}
- printk(KERN_INFO "%s: I2O LAN device registered, tid = %d,"
- " subclass = 0x%08X, unit = %d.\n",
- dev->name, i2o_dev->lct_data->tid, i2o_dev->lct_data->sub_class,
- ((struct i2o_lan_local *)dev->priv)->unit);
+ printk(KERN_INFO "%s: I2O LAN device registered, "
+ "subclass = 0x%04x, unit = %d, tid = %d.\n",
+ dev->name, i2o_dev->lct_data.sub_class,
+ ((struct i2o_lan_local *)dev->priv)->unit,
+ i2o_dev->lct_data.tid);
}
i2o_unlock_controller(iop);
@@ -1097,45 +1501,55 @@
for (i = 0; i <= unit; i++) {
struct net_device *dev = i2o_landevs[i];
- struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
- struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
- switch (i2o_dev->lct_data->sub_class) {
+ switch (i2o_dev->lct_data.sub_class) {
case I2O_LAN_ETHERNET:
unregister_netdev(dev);
- kfree(dev);
break;
#ifdef CONFIG_FDDI
case I2O_LAN_FDDI:
unregister_netdevice(dev);
- kfree(dev);
break;
#endif
#ifdef CONFIG_TR
case I2O_LAN_TR:
unregister_trdev(dev);
- kfree(dev);
+ break;
+#endif
+#ifdef CONFIG_NET_FC
+ case I2O_LAN_FIBRE_CHANNEL:
+ unregister_fcdev(dev);
break;
#endif
default:
- printk(KERN_WARNING "i2o_lan: Spurious I2O LAN subclass 0x%08X.\n",
- i2o_dev->lct_data->sub_class);
+ printk(KERN_WARNING "%s: Spurious I2O LAN subclass 0x%08x.\n",
+ dev->name, i2o_dev->lct_data.sub_class);
}
dprintk(KERN_INFO "%s: I2O LAN device unregistered.\n",
dev->name);
+ kfree(dev);
}
i2o_remove_handler(&i2o_lan_handler);
+ i2o_remove_handler(&i2o_lan_send_handler);
+ i2o_remove_handler(&i2o_lan_receive_handler);
}
EXPORT_NO_SYMBOLS;
-MODULE_AUTHOR("Univ of Helsinki, CS Department");
+MODULE_AUTHOR("University of Helsinki, Department of Computer Science");
MODULE_DESCRIPTION("I2O Lan OSM");
-MODULE_PARM(bucketpost, "i"); // Total number of buckets to post
-MODULE_PARM(bucketthresh, "i"); // Bucket post threshold
-MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_buckets_out, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
+MODULE_PARM_DESC(max_buckets_out, "Total number of buckets to post (1-)");
+MODULE_PARM(bucket_thresh, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
+MODULE_PARM_DESC(bucket_thresh, "Bucket post threshold (1-)");
+MODULE_PARM(rx_copybreak, "1-" "i");
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy only small frames (1-)");
+MODULE_PARM(tx_batch_mode, "0-1" "i");
+MODULE_PARM_DESC(tx_batch_mode, "0=Use immediate mode send, 1=Use batch mode send");
#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)