From 6a68afe3a2971953e218e509b16eae0ece43f9ac Mon Sep 17 00:00:00 2001 From: Krzysztof Hałasa Date: Sat, 23 May 2009 23:14:10 +0200 Subject: IXP4xx: Ethernet and WAN drivers now support "high" hardware queues. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof Hałasa --- drivers/net/arm/ixp4xx_eth.c | 15 +++++++++------ drivers/net/wan/ixp4xx_hss.c | 10 ++++++---- 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index a740053d3af..d304c731c47 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -456,7 +456,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys, debug_desc(phys, desc); BUG_ON(phys & 0x1F); qmgr_put_entry(queue, phys); - BUG_ON(qmgr_stat_overflow(queue)); + /* Don't check for queue overflow here, we've allocated sufficient + length and queues >= 32 don't support this check anyway. */ } @@ -512,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget) #endif napi_complete(napi); qmgr_enable_irq(rxq); - if (!qmgr_stat_empty(rxq) && - napi_reschedule(napi)) { + if (!qmgr_stat_nearly_empty(rxq) && + napi_reschedule(napi)) { /* really empty in fact */ #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" " napi_reschedule successed\n", @@ -630,7 +631,8 @@ static void eth_txdone_irq(void *unused) port->tx_buff_tab[n_desc] = NULL; } - start = qmgr_stat_empty(port->plat->txreadyq); + /* really empty in fact */ + start = qmgr_stat_nearly_empty(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, phys, desc); if (start) { #if DEBUG_TX @@ -708,13 +710,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_empty(txreadyq)) { + if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ - if (!qmgr_stat_empty(txreadyq)) { + /* really empty in fact */ + if (!qmgr_stat_nearly_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit ready again\n", dev->name); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 765a7f5d6aa..1e56e58c660 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -579,7 +579,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys, debug_desc(phys, desc); BUG_ON(phys & 0x1F); qmgr_put_entry(queue, phys); - BUG_ON(qmgr_stat_overflow(queue)); + /* Don't check for queue overflow here, we've allocated sufficient + length and queues >= 32 don't support this check anyway. */ } @@ -789,7 +790,8 @@ static void hss_hdlc_txdone_irq(void *pdev) free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; - start = qmgr_stat_empty(port->plat->txreadyq); + /* really empty in fact */ + start = qmgr_stat_nearly_empty(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, n_desc), desc); if (start) { @@ -867,13 +869,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_empty(txreadyq)) { + if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ - if (!qmgr_stat_empty(txreadyq)) { + if (!qmgr_stat_nearly_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", dev->name); -- cgit v1.2.3 From 2e418400728a9fcacb2ab75f0547584a56b8a584 Mon Sep 17 00:00:00 2001 From: Krzysztof Hałasa Date: Sat, 23 May 2009 23:14:59 +0200 Subject: IXP4xx: Whitespace fixes in the Ethernet driver. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof Hałasa --- drivers/net/arm/ixp4xx_eth.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index d304c731c47..672c9626b9c 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -817,29 +817,29 @@ static int request_queues(struct port *port) int err; err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, - "%s:RX-free", port->netdev->name); + "%s:RX-free", port->netdev->name); if (err) return err; err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, - "%s:RX", port->netdev->name); + "%s:RX", port->netdev->name); if (err) goto rel_rxfree; err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, - "%s:TX", port->netdev->name); + "%s:TX", port->netdev->name); if (err) goto rel_rx; err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, - "%s:TX-ready", port->netdev->name); + "%s:TX-ready", port->netdev->name); if (err) goto rel_tx; /* TX-done queue handles skbs sent out by the NPEs */ if (!ports_open) { err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, - "%s:TX-done", DRV_NAME); + "%s:TX-done", DRV_NAME); if (err) goto rel_txready; } -- cgit v1.2.3 From 9733bb8e9ce0078f55405ce470a62ec0a551fe99 Mon Sep 17 00:00:00 2001 From: Krzysztof Hałasa Date: Mon, 25 May 2009 13:25:34 +0200 Subject: IXP4xx: Change QMgr function names to qmgr_stat_*_watermark and clean the comments. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Krzysztof Hałasa --- arch/arm/mach-ixp4xx/include/mach/qmgr.h | 22 +++++++++++----------- drivers/net/arm/ixp4xx_eth.c | 13 ++++++------- drivers/net/wan/ixp4xx_hss.c | 9 ++++----- 3 files changed, 21 insertions(+), 23 deletions(-) (limited to 'drivers/net') diff --git a/arch/arm/mach-ixp4xx/include/mach/qmgr.h b/arch/arm/mach-ixp4xx/include/mach/qmgr.h index 32077e11f17..9e7cad2d54c 100644 --- a/arch/arm/mach-ixp4xx/include/mach/qmgr.h +++ b/arch/arm/mach-ixp4xx/include/mach/qmgr.h @@ -138,12 +138,12 @@ static inline int qmgr_stat_empty(unsigned int queue) } /** - * qmgr_stat_empty() - checks if a hardware queue is nearly empty + * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark * @queue: queue number * - * Returns non-zero value if the queue is nearly or completely empty. + * Returns non-zero value if the queue is below low watermark. */ -static inline int qmgr_stat_nearly_empty(unsigned int queue) +static inline int qmgr_stat_below_low_watermark(unsigned int queue) { extern struct qmgr_regs __iomem *qmgr_regs; if (queue >= HALF_QUEUES) @@ -153,19 +153,19 @@ static inline int qmgr_stat_nearly_empty(unsigned int queue) } /** - * qmgr_stat_empty() - checks if a hardware queue is nearly full + * qmgr_stat_above_high_watermark() - checks if a queue is above high watermark * @queue: queue number * - * Returns non-zero value if the queue is nearly or completely full. + * Returns non-zero value if the queue is above high watermark */ -static inline int qmgr_stat_nearly_full(unsigned int queue) +static inline int qmgr_stat_above_high_watermark(unsigned int queue) { BUG_ON(queue >= HALF_QUEUES); return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL; } /** - * qmgr_stat_empty() - checks if a hardware queue is full + * qmgr_stat_full() - checks if a hardware queue is full * @queue: queue number * * Returns non-zero value if the queue is full. @@ -180,10 +180,10 @@ static inline int qmgr_stat_full(unsigned int queue) } /** - * qmgr_stat_empty() - checks if a hardware queue experienced underflow + * qmgr_stat_underflow() - checks if a hardware queue experienced underflow * @queue: queue number * - * Returns non-zero value if empty. + * Returns non-zero value if the queue experienced underflow. */ static inline int qmgr_stat_underflow(unsigned int queue) { @@ -191,10 +191,10 @@ static inline int qmgr_stat_underflow(unsigned int queue) } /** - * qmgr_stat_empty() - checks if a hardware queue experienced overflow + * qmgr_stat_overflow() - checks if a hardware queue experienced overflow * @queue: queue number * - * Returns non-zero value if empty. + * Returns non-zero value if the queue experienced overflow. */ static inline int qmgr_stat_overflow(unsigned int queue) { diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 672c9626b9c..b6d188115ca 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -513,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget) #endif napi_complete(napi); qmgr_enable_irq(rxq); - if (!qmgr_stat_nearly_empty(rxq) && - napi_reschedule(napi)) { /* really empty in fact */ + if (!qmgr_stat_below_low_watermark(rxq) && + napi_reschedule(napi)) { /* not empty again */ #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" " napi_reschedule successed\n", @@ -631,10 +631,9 @@ static void eth_txdone_irq(void *unused) port->tx_buff_tab[n_desc] = NULL; } - /* really empty in fact */ - start = qmgr_stat_nearly_empty(port->plat->txreadyq); + start = qmgr_stat_below_low_watermark(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, phys, desc); - if (start) { + if (start) { /* TX-ready queue was empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", port->netdev->name); @@ -710,14 +709,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ + if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ /* really empty in fact */ - if (!qmgr_stat_nearly_empty(txreadyq)) { + if (!qmgr_stat_below_low_watermark(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit ready again\n", dev->name); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 1e56e58c660..a6dc317083d 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -790,11 +790,10 @@ static void hss_hdlc_txdone_irq(void *pdev) free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; - /* really empty in fact */ - start = qmgr_stat_nearly_empty(port->plat->txreadyq); + start = qmgr_stat_below_low_watermark(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, n_desc), desc); - if (start) { + if (start) { /* TX-ready queue was empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" " ready\n", dev->name); @@ -869,13 +868,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); dev->trans_start = jiffies; - if (qmgr_stat_nearly_empty(txreadyq)) { /* really empty in fact */ + if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ - if (!qmgr_stat_nearly_empty(txreadyq)) { + if (!qmgr_stat_below_low_watermark(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", dev->name); -- cgit v1.2.3 From ab6bf42e2339580b5d87746d0ff4da4b1578b03e Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Wed, 27 May 2009 14:38:34 -0700 Subject: mlx4_core: Add module parameter for number of MTTs per segment The current MTT allocator uses kmalloc() to allocate a buffer for its buddy allocator, and thus is limited in the amount of MTT segments that it can control. As a result, the size of memory that can be registered is limited too. This patch uses a module parameter to control the number of MTT entries that each segment represents, allowing more memory to be registered with the same number of segments. Signed-off-by: Eli Cohen Signed-off-by: Roland Dreier --- drivers/net/mlx4/main.c | 14 ++++++++++++-- drivers/net/mlx4/mr.c | 6 +++--- drivers/net/mlx4/profile.c | 2 +- include/linux/mlx4/device.h | 1 + 4 files changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 30bea968969..018348c0119 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " "(0/1, default 0)"); +static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, - MLX4_MTT_ENTRY_PER_SEG); + dev->caps.mtts_per_seg); dev->caps.reserved_mrws = dev_cap->reserved_mrws; dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; - dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; @@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void) return -1; } + if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { + printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + return -1; + } + return 0; } diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 0caf74cae8b..3b8973d1993 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, } else mtt->page_shift = page_shift; - for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) + for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) ++mtt->order; mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); @@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | MLX4_MPT_PD_FLAG_RAE); mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * - MLX4_MTT_ENTRY_PER_SEG); + dev->caps.mtts_per_seg); } else { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); } @@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) return -EINVAL; - if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) + if (start_index & (dev->caps.mtts_per_seg - 1)) return -EINVAL; mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index cebdf3243ca..bd22df95adf 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c @@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; - profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; profile[MLX4_RES_QP].num = request->num_qp; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 3aff8a6a389..ce7cc6c7bcb 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -210,6 +210,7 @@ struct mlx4_caps { int num_comp_vectors; int num_mpts; int num_mtt_segs; + int mtts_per_seg; int fmr_reserved_mtts; int reserved_mtts; int reserved_mrws; -- cgit v1.2.3 From 80153d1bcc6a20361d5974f37d3729583ba99154 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Tue, 12 May 2009 19:37:20 +0000 Subject: [ARM] pxa/stargate2: Add board specific elements to the smc91x driver Signed-off-by: Jonathan Cameron Signed-off-by: Eric Miao --- drivers/net/smc91x.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 329f890e290..f1f773b17fe 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -45,7 +45,8 @@ defined(CONFIG_MACH_ZYLONITE) ||\ defined(CONFIG_MACH_LITTLETON) ||\ defined(CONFIG_MACH_ZYLONITE2) ||\ - defined(CONFIG_ARCH_VIPER) + defined(CONFIG_ARCH_VIPER) ||\ + defined(CONFIG_MACH_STARGATE2) #include @@ -73,7 +74,7 @@ /* We actually can't write halfwords properly if not word aligned */ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) { - if (machine_is_mainstone() && reg & 2) { + if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { unsigned int v = val << 16; v |= readl(ioaddr + (reg & ~2)) & 0xffff; writel(v, ioaddr + (reg & ~2)); -- cgit v1.2.3 From fdd7b4c3302c93f6833e338903ea77245eb510b4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 9 Jun 2009 04:01:02 -0700 Subject: r8169: fix crash when large packets are received Michael Tokarev reported receiving a large packet could crash a machine with RTL8169 NIC. ( original thread at http://lkml.org/lkml/2009/6/8/192 ) Problem is this driver tells that NIC frames up to 16383 bytes can be received but provides skb to rx ring allocated with smaller sizes (1536 bytes in case standard 1500 bytes MTU is used) When a frame larger than what was allocated by driver is received, dma transfert can occurs past the end of buffer and corrupt kernel memory. Fix is to tell to NIC what is the maximum size a frame can be. This bug is very old, (before git introduction, linux-2.6.10), and should be backported to stable versions. Reported-by: Michael Tokarev Signed-off-by: Eric Dumazet Tested-by: Michael Tokarev Signed-off-by: David S. Miller --- drivers/net/r8169.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 8247a945a1d..3b19e0ce290 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32; #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ -#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -2357,10 +2356,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) return cmd; } -static void rtl_set_rx_max_size(void __iomem *ioaddr) +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(RxMaxSize, 16383); + RTL_W16(RxMaxSize, rx_buf_sz); } static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) @@ -2407,7 +2406,7 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr); + rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || (tp->mac_version == RTL_GIGA_MAC_VER_02) || @@ -2668,7 +2667,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr); + rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; @@ -2846,7 +2845,7 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr); + rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; -- cgit v1.2.3 From 4edd473f208cff77ce1f7ef26d5a41f31fa198e0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 8 Jun 2009 18:14:42 -0700 Subject: [SCSI] bnx2: Add support for CNIC driver. Add interface and functions to support a new CNIC driver to drive the Broadcom bnx2 hardware for iSCSI offload. Signed-off-by: Michael Chan Acked-by: David S. Miller Signed-off-by: James Bottomley --- drivers/net/bnx2.c | 193 ++++++++++++++++++++++++++++++++++++++++++++++++++++- drivers/net/bnx2.h | 18 +++++ 2 files changed, 208 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b0cb29d4cc0..3f5fcb0156a 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -49,6 +49,10 @@ #include #include +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#define BCM_CNIC 1 +#include "cnic_if.h" +#endif #include "bnx2.h" #include "bnx2_fw.h" @@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) spin_unlock_bh(&bp->indirect_lock); } +#ifdef BCM_CNIC +static int +bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info) +{ + struct bnx2 *bp = netdev_priv(dev); + struct drv_ctl_io *io = &info->data.io; + + switch (info->cmd) { + case DRV_CTL_IO_WR_CMD: + bnx2_reg_wr_ind(bp, io->offset, io->data); + break; + case DRV_CTL_IO_RD_CMD: + io->data = bnx2_reg_rd_ind(bp, io->offset); + break; + case DRV_CTL_CTX_WR_CMD: + bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); + break; + default: + return -EINVAL; + } + return 0; +} + +static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + int sb_id; + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_present = 0; + sb_id = bp->irq_nvecs; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_tag = bnapi->last_status_idx; + bnapi->cnic_present = 1; + sb_id = 0; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + + cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; + cp->irq_arr[0].status_blk = (void *) + ((unsigned long) bnapi->status_blk.msi + + (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); + cp->irq_arr[0].status_blk_num = sb_id; + cp->num_irq = 1; +} + +static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (ops == NULL) + return -EINVAL; + + if (cp->drv_state & CNIC_DRV_STATE_REGD) + return -EBUSY; + + bp->cnic_data = data; + rcu_assign_pointer(bp->cnic_ops, ops); + + cp->num_irq = 0; + cp->drv_state = CNIC_DRV_STATE_REGD; + + bnx2_setup_cnic_irq_info(bp); + + return 0; +} + +static int bnx2_unregister_cnic(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + cp->drv_state = 0; + bnapi->cnic_present = 0; + rcu_assign_pointer(bp->cnic_ops, NULL); + synchronize_rcu(); + return 0; +} + +struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = bp->chip_id; + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->drv_ctl = bnx2_drv_ctl; + cp->drv_register_cnic = bnx2_register_cnic; + cp->drv_unregister_cnic = bnx2_unregister_cnic; + + return cp; +} +EXPORT_SYMBOL(bnx2_cnic_probe); + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) { + info.cmd = CNIC_CTL_STOP_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + rcu_read_unlock(); +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) { + if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + + bnapi->cnic_tag = bnapi->last_status_idx; + } + info.cmd = CNIC_CTL_START_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + rcu_read_unlock(); +} + +#else + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ +} + +#endif + static int bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) { @@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp) static void bnx2_netif_stop(struct bnx2 *bp) { + bnx2_cnic_stop(bp); bnx2_disable_int_sync(bp); if (netif_running(bp->dev)) { bnx2_napi_disable(bp); @@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp) netif_tx_wake_all_queues(bp->dev); bnx2_napi_enable(bp); bnx2_enable_int(bp); + bnx2_cnic_start(bp); } } } @@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi) if (bnx2_has_fast_work(bnapi)) return 1; +#ifdef BCM_CNIC + if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx)) + return 1; +#endif + if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) return 1; @@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp) bp->idle_chk_status_idx = bnapi->last_status_idx; } +#ifdef BCM_CNIC +static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + struct cnic_ops *c_ops; + + if (!bnapi->cnic_present) + return; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, + bnapi->status_blk.msi); + rcu_read_unlock(); +} +#endif + static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) { struct status_block *sblk = bnapi->status_blk.msi; @@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget) work_done = bnx2_poll_work(bp, bnapi, work_done, budget); +#ifdef BCM_CNIC + bnx2_poll_cnic(bp, bnapi); +#endif + /* bnapi->last_status_idx is used below to tell the hw how * much work has been processed, so we must read it before * checking for more work. @@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp) val = REG_RD(bp, BNX2_MQ_CONFIG); val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; - if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) - val |= BNX2_MQ_CONFIG_HALT_DIS; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; + if (CHIP_REV(bp) == CHIP_REV_Ax) + val |= BNX2_MQ_CONFIG_HALT_DIS; + } REG_WR(bp, BNX2_MQ_CONFIG, val); @@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) INIT_WORK(&bp->reset_task, bnx2_reset_task); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); - mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); + mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); dev->mem_end = dev->mem_start + mem_len; dev->irq = pdev->irq; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 5b570e17c83..a1ff739bc9b 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -361,6 +361,9 @@ struct l2_fhdr { #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) #define BNX2_L2CTX_HOST_BDIDX 0x00000004 +#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 +#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) #define BNX2_L2CTX_HOST_BSEQ 0x00000008 #define BNX2_L2CTX_NX_BSEQ 0x0000000c #define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 @@ -5900,6 +5903,7 @@ struct l2_fhdr { #define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) #define BNX2_RXP_SCRATCH 0x000e0000 +#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 #define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 @@ -6678,6 +6682,11 @@ struct bnx2_napi { u32 last_status_idx; u32 int_num; +#ifdef BCM_CNIC + u32 cnic_tag; + int cnic_present; +#endif + struct bnx2_rx_ring_info rx_ring; struct bnx2_tx_ring_info tx_ring; }; @@ -6727,6 +6736,11 @@ struct bnx2 { int tx_ring_size; u32 tx_wake_thresh; +#ifdef BCM_CNIC + struct cnic_ops *cnic_ops; + void *cnic_data; +#endif + /* End of fields used in the performance code paths. */ unsigned int current_interval; @@ -6885,6 +6899,10 @@ struct bnx2 { u32 idle_chk_status_idx; +#ifdef BCM_CNIC + struct cnic_eth_dev cnic_eth_dev; +#endif + const struct firmware *mips_firmware; const struct firmware *rv2p_firmware; }; -- cgit v1.2.3 From a463696039f7097ce87c21db3cf5c16cdcb3850d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 8 Jun 2009 18:14:43 -0700 Subject: [SCSI] cnic: Add new Broadcom CNIC driver. The CNIC driver controls BNX2 hardware rings and resources used by iSCSI. Most hardware resources for iSCSI are separate from those used for ethernet networking. iSCSI uses a separate MAC address and IP address. The CNIC driver creates a UIO interface to handle the non-offloaded packets such as ARP, etc in userspace. Signed-off-by: Michael Chan Acked-by: David S. Miller Signed-off-by: James Bottomley --- drivers/net/Kconfig | 11 + drivers/net/Makefile | 1 + drivers/net/cnic.c | 2711 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/cnic.h | 299 ++++++ drivers/net/cnic_defs.h | 580 ++++++++++ drivers/net/cnic_if.h | 299 ++++++ 6 files changed, 3901 insertions(+) create mode 100644 drivers/net/cnic.c create mode 100644 drivers/net/cnic.h create mode 100644 drivers/net/cnic_defs.h create mode 100644 drivers/net/cnic_if.h (limited to 'drivers/net') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 214a92d1ef7..f3c4a3b910b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2264,6 +2264,17 @@ config BNX2 To compile this driver as a module, choose M here: the module will be called bnx2. This is recommended. +config CNIC + tristate "Broadcom CNIC support" + depends on BNX2 + depends on UIO + help + This driver supports offload features of Broadcom NetXtremeII + gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called cnic. This is recommended. + config SPIDER_NET tristate "Spider Gigabit Ethernet driver" depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 1fc4602a6ff..e6f1f8c3f8d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BNX2) += bnx2.o +obj-$(CONFIG_CNIC) += cnic.o obj-$(CONFIG_BNX2X) += bnx2x.o bnx2x-objs := bnx2x_main.o bnx2x_link.o spidernet-y += spider_net.o spider_net_ethtool.o diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c new file mode 100644 index 00000000000..8d740376bbd --- /dev/null +++ b/drivers/net/cnic.c @@ -0,0 +1,2711 @@ +/* cnic.c: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) + * Modified and maintained by: Michael Chan + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define BCM_VLAN 1 +#endif +#include +#include +#include +#include +#include +#include + +#include "cnic_if.h" +#include "bnx2.h" +#include "cnic.h" +#include "cnic_defs.h" + +#define DRV_MODULE_NAME "cnic" +#define PFX DRV_MODULE_NAME ": " + +static char version[] __devinitdata = + "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Michael Chan and John(Zongxi) " + "Chen (zongxi@broadcom.com"); +MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(CNIC_MODULE_VERSION); + +static LIST_HEAD(cnic_dev_list); +static DEFINE_RWLOCK(cnic_dev_lock); +static DEFINE_MUTEX(cnic_lock); + +static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; + +static int cnic_service_bnx2(void *, void *); +static int cnic_ctl(void *, struct cnic_ctl_info *); + +static struct cnic_ops cnic_bnx2_ops = { + .cnic_owner = THIS_MODULE, + .cnic_handler = cnic_service_bnx2, + .cnic_ctl = cnic_ctl, +}; + +static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); +static void cnic_init_bnx2_tx_ring(struct cnic_dev *); +static void cnic_init_bnx2_rx_ring(struct cnic_dev *); +static int cnic_cm_set_pg(struct cnic_sock *); + +static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_dev *dev = uinfo->priv; + struct cnic_local *cp = dev->cnic_priv; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (cp->uio_dev != -1) + return -EBUSY; + + cp->uio_dev = iminor(inode); + + cnic_shutdown_bnx2_rx_ring(dev); + + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + + return 0; +} + +static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_dev *dev = uinfo->priv; + struct cnic_local *cp = dev->cnic_priv; + + cp->uio_dev = -1; + return 0; +} + +static inline void cnic_hold(struct cnic_dev *dev) +{ + atomic_inc(&dev->ref_count); +} + +static inline void cnic_put(struct cnic_dev *dev) +{ + atomic_dec(&dev->ref_count); +} + +static inline void csk_hold(struct cnic_sock *csk) +{ + atomic_inc(&csk->ref_count); +} + +static inline void csk_put(struct cnic_sock *csk) +{ + atomic_dec(&csk->ref_count); +} + +static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) +{ + struct cnic_dev *cdev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(cdev, &cnic_dev_list, list) { + if (netdev == cdev->netdev) { + cnic_hold(cdev); + read_unlock(&cnic_dev_lock); + return cdev; + } + } + read_unlock(&cnic_dev_lock); + return NULL; +} + +static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_CTX_WR_CMD; + io->cid_addr = cid_addr; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_WR_CMD; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_RD_CMD; + io->offset = off; + ethdev->drv_ctl(dev->netdev, &info); + return io->data; +} + +static int cnic_in_use(struct cnic_sock *csk) +{ + return test_bit(SK_F_INUSE, &csk->flags); +} + +static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + + info.cmd = DRV_CTL_COMPLETION_CMD; + info.data.comp.comp_count = count; + ethdev->drv_ctl(dev->netdev, &info); +} + +static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, + struct cnic_sock *csk) +{ + struct iscsi_path path_req; + char *buf = NULL; + u16 len = 0; + u32 msg_type = ISCSI_KEVENT_IF_DOWN; + struct cnic_ulp_ops *ulp_ops; + + if (cp->uio_dev == -1) + return -ENODEV; + + if (csk) { + len = sizeof(path_req); + buf = (char *) &path_req; + memset(&path_req, 0, len); + + msg_type = ISCSI_KEVENT_PATH_REQ; + path_req.handle = (u64) csk->l5_cid; + if (test_bit(SK_F_IPV6, &csk->flags)) { + memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], + sizeof(struct in6_addr)); + path_req.ip_addr_len = 16; + } else { + memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], + sizeof(struct in_addr)); + path_req.ip_addr_len = 4; + } + path_req.vlan_id = csk->vlan_id; + path_req.pmtu = csk->mtu; + } + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); + if (ulp_ops) + ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); + rcu_read_unlock(); + return 0; +} + +static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, + char *buf, u16 len) +{ + int rc = -EINVAL; + + switch (msg_type) { + case ISCSI_UEVENT_PATH_UPDATE: { + struct cnic_local *cp; + u32 l5_cid; + struct cnic_sock *csk; + struct iscsi_path *path_resp; + + if (len < sizeof(*path_resp)) + break; + + path_resp = (struct iscsi_path *) buf; + cp = dev->cnic_priv; + l5_cid = (u32) path_resp->handle; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + break; + + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + if (cnic_in_use(csk)) { + memcpy(csk->ha, path_resp->mac_addr, 6); + if (test_bit(SK_F_IPV6, &csk->flags)) + memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, + sizeof(struct in6_addr)); + else + memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, + sizeof(struct in_addr)); + if (is_valid_ether_addr(csk->ha)) + cnic_cm_set_pg(csk); + } + csk_put(csk); + rc = 0; + } + } + + return rc; +} + +static int cnic_offld_prep(struct cnic_sock *csk) +{ + if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + return 0; + + if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + return 0; + } + + return 1; +} + +static int cnic_close_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_clear_bit(); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + return 1; + } + return 0; +} + +static int cnic_abort_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_clear_bit(); + + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; + return 1; + } + + return 0; +} + +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) +{ + struct cnic_dev *dev; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl[ulp_type]) { + printk(KERN_ERR PFX "cnic_register_driver: Type %d has already " + "been registered\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); + } + read_unlock(&cnic_dev_lock); + + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); + mutex_unlock(&cnic_lock); + + /* Prevent race conditions with netdev_event */ + rtnl_lock(); + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_init(dev); + } + read_unlock(&cnic_dev_lock); + rtnl_unlock(); + + return 0; +} + +int cnic_unregister_driver(int ulp_type) +{ + struct cnic_dev *dev; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (!cnic_ulp_tbl[ulp_type]) { + printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " + "been registered\n", ulp_type); + goto out_unlock; + } + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + printk(KERN_ERR PFX "cnic_unregister_driver: Type %d " + "still has devices registered\n", ulp_type); + read_unlock(&cnic_dev_lock); + goto out_unlock; + } + } + read_unlock(&cnic_dev_lock); + + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); + + mutex_unlock(&cnic_lock); + synchronize_rcu(); + return 0; + +out_unlock: + mutex_unlock(&cnic_lock); + return -EINVAL; +} + +static int cnic_start_hw(struct cnic_dev *); +static void cnic_stop_hw(struct cnic_dev *); + +static int cnic_register_device(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl[ulp_type] == NULL) { + printk(KERN_ERR PFX "cnic_register_device: Driver with type %d " + "has not been registered\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EAGAIN; + } + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + printk(KERN_ERR PFX "cnic_register_device: Type %d has already " + "been registered to this device\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); + cp->ulp_handle[ulp_type] = ulp_ctx; + ulp_ops = cnic_ulp_tbl[ulp_type]; + rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); + cnic_hold(dev); + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); + + mutex_unlock(&cnic_lock); + + return 0; + +} +EXPORT_SYMBOL(cnic_register_driver); + +static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (ulp_type >= MAX_CNIC_ULP_TYPE) { + printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", + ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (rcu_dereference(cp->ulp_ops[ulp_type])) { + rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); + cnic_put(dev); + } else { + printk(KERN_ERR PFX "cnic_unregister_device: device not " + "registered to this ulp type %d\n", ulp_type); + mutex_unlock(&cnic_lock); + return -EINVAL; + } + mutex_unlock(&cnic_lock); + + synchronize_rcu(); + + return 0; +} +EXPORT_SYMBOL(cnic_unregister_driver); + +static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) +{ + id_tbl->start = start_id; + id_tbl->max = size; + id_tbl->next = 0; + spin_lock_init(&id_tbl->lock); + id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); + if (!id_tbl->table) + return -ENOMEM; + + return 0; +} + +static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) +{ + kfree(id_tbl->table); + id_tbl->table = NULL; +} + +static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + int ret = -1; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return ret; + + spin_lock(&id_tbl->lock); + if (!test_bit(id, id_tbl->table)) { + set_bit(id, id_tbl->table); + ret = 0; + } + spin_unlock(&id_tbl->lock); + return ret; +} + +/* Returns -1 if not successful */ +static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) +{ + u32 id; + + spin_lock(&id_tbl->lock); + id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); + if (id >= id_tbl->max) { + id = -1; + if (id_tbl->next != 0) { + id = find_first_zero_bit(id_tbl->table, id_tbl->next); + if (id >= id_tbl->next) + id = -1; + } + } + + if (id < id_tbl->max) { + set_bit(id, id_tbl->table); + id_tbl->next = (id + 1) & (id_tbl->max - 1); + id += id_tbl->start; + } + + spin_unlock(&id_tbl->lock); + + return id; +} + +static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + if (id == -1) + return; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return; + + clear_bit(id, id_tbl->table); +} + +static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + + if (!dma->pg_arr) + return; + + for (i = 0; i < dma->num_pages; i++) { + if (dma->pg_arr[i]) { + pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE, + dma->pg_arr[i], dma->pg_map_arr[i]); + dma->pg_arr[i] = NULL; + } + } + if (dma->pgtbl) { + pci_free_consistent(dev->pcidev, dma->pgtbl_size, + dma->pgtbl, dma->pgtbl_map); + dma->pgtbl = NULL; + } + kfree(dma->pg_arr); + dma->pg_arr = NULL; + dma->num_pages = 0; +} + +static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + u32 *page_table = dma->pgtbl; + + for (i = 0; i < dma->num_pages; i++) { + /* Each entry needs to be in big endian format. */ + *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); + page_table++; + *page_table = (u32) dma->pg_map_arr[i]; + page_table++; + } +} + +static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, + int pages, int use_pg_tbl) +{ + int i, size; + struct cnic_local *cp = dev->cnic_priv; + + size = pages * (sizeof(void *) + sizeof(dma_addr_t)); + dma->pg_arr = kzalloc(size, GFP_ATOMIC); + if (dma->pg_arr == NULL) + return -ENOMEM; + + dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); + dma->num_pages = pages; + + for (i = 0; i < pages; i++) { + dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev, + BCM_PAGE_SIZE, + &dma->pg_map_arr[i]); + if (dma->pg_arr[i] == NULL) + goto error; + } + if (!use_pg_tbl) + return 0; + + dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & + ~(BCM_PAGE_SIZE - 1); + dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size, + &dma->pgtbl_map); + if (dma->pgtbl == NULL) + goto error; + + cp->setup_pgtbl(dev, dma); + + return 0; + +error: + cnic_free_dma(dev, dma); + return -ENOMEM; +} + +static void cnic_free_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i = 0; + + if (cp->cnic_uinfo) { + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + while (cp->uio_dev != -1 && i < 15) { + msleep(100); + i++; + } + uio_unregister_device(cp->cnic_uinfo); + kfree(cp->cnic_uinfo); + cp->cnic_uinfo = NULL; + } + + if (cp->l2_buf) { + pci_free_consistent(dev->pcidev, cp->l2_buf_size, + cp->l2_buf, cp->l2_buf_map); + cp->l2_buf = NULL; + } + + if (cp->l2_ring) { + pci_free_consistent(dev->pcidev, cp->l2_ring_size, + cp->l2_ring, cp->l2_ring_map); + cp->l2_ring = NULL; + } + + for (i = 0; i < cp->ctx_blks; i++) { + if (cp->ctx_arr[i].ctx) { + pci_free_consistent(dev->pcidev, cp->ctx_blk_size, + cp->ctx_arr[i].ctx, + cp->ctx_arr[i].mapping); + cp->ctx_arr[i].ctx = NULL; + } + } + kfree(cp->ctx_arr); + cp->ctx_arr = NULL; + cp->ctx_blks = 0; + + cnic_free_dma(dev, &cp->gbl_buf_info); + cnic_free_dma(dev, &cp->conn_buf_info); + cnic_free_dma(dev, &cp->kwq_info); + cnic_free_dma(dev, &cp->kcq_info); + kfree(cp->iscsi_tbl); + cp->iscsi_tbl = NULL; + kfree(cp->ctx_tbl); + cp->ctx_tbl = NULL; + + cnic_free_id_tbl(&cp->cid_tbl); +} + +static int cnic_alloc_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (CHIP_NUM(cp) == CHIP_NUM_5709) { + int i, k, arr_size; + + cp->ctx_blk_size = BCM_PAGE_SIZE; + cp->cids_per_blk = BCM_PAGE_SIZE / 128; + arr_size = BNX2_MAX_CID / cp->cids_per_blk * + sizeof(struct cnic_ctx); + cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); + if (cp->ctx_arr == NULL) + return -ENOMEM; + + k = 0; + for (i = 0; i < 2; i++) { + u32 j, reg, off, lo, hi; + + if (i == 0) + off = BNX2_PG_CTX_MAP; + else + off = BNX2_ISCSI_CTX_MAP; + + reg = cnic_reg_rd_ind(dev, off); + lo = reg >> 16; + hi = reg & 0xffff; + for (j = lo; j < hi; j += cp->cids_per_blk, k++) + cp->ctx_arr[k].cid = j; + } + + cp->ctx_blks = k; + if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { + cp->ctx_blks = 0; + return -ENOMEM; + } + + for (i = 0; i < cp->ctx_blks; i++) { + cp->ctx_arr[i].ctx = + pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE, + &cp->ctx_arr[i].mapping); + if (cp->ctx_arr[i].ctx == NULL) + return -ENOMEM; + } + } + return 0; +} + +static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct uio_info *uinfo; + int ret; + + ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); + if (ret) + goto error; + cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; + + ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); + if (ret) + goto error; + cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; + + ret = cnic_alloc_context(dev); + if (ret) + goto error; + + cp->l2_ring_size = 2 * BCM_PAGE_SIZE; + cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size, + &cp->l2_ring_map); + if (!cp->l2_ring) + goto error; + + cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; + cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); + cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size, + &cp->l2_buf_map); + if (!cp->l2_buf) + goto error; + + uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); + if (!uinfo) + goto error; + + uinfo->mem[0].addr = dev->netdev->base_addr; + uinfo->mem[0].internal_addr = dev->regview; + uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; + uinfo->mem[0].memtype = UIO_MEM_PHYS; + + uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; + if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; + else + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; + uinfo->mem[1].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[2].addr = (unsigned long) cp->l2_ring; + uinfo->mem[2].size = cp->l2_ring_size; + uinfo->mem[2].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[3].addr = (unsigned long) cp->l2_buf; + uinfo->mem[3].size = cp->l2_buf_size; + uinfo->mem[3].memtype = UIO_MEM_LOGICAL; + + uinfo->name = "bnx2_cnic"; + uinfo->version = CNIC_MODULE_VERSION; + uinfo->irq = UIO_IRQ_CUSTOM; + + uinfo->open = cnic_uio_open; + uinfo->release = cnic_uio_close; + + uinfo->priv = dev; + + ret = uio_register_device(&dev->pcidev->dev, uinfo); + if (ret) { + kfree(uinfo); + goto error; + } + + cp->cnic_uinfo = uinfo; + + return 0; + +error: + cnic_free_resc(dev); + return ret; +} + +static inline u32 cnic_kwq_avail(struct cnic_local *cp) +{ + return cp->max_kwq_idx - + ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); +} + +static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + struct kwqe *prod_qe; + u16 prod, sw_prod, i; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + spin_lock_bh(&cp->cnic_ulp_lock); + if (num_wqes > cnic_kwq_avail(cp) && + !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) { + spin_unlock_bh(&cp->cnic_ulp_lock); + return -EAGAIN; + } + + cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT; + + prod = cp->kwq_prod_idx; + sw_prod = prod & MAX_KWQ_IDX; + for (i = 0; i < num_wqes; i++) { + prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; + memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); + prod++; + sw_prod = prod & MAX_KWQ_IDX; + } + cp->kwq_prod_idx = prod; + + CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); + + spin_unlock_bh(&cp->cnic_ulp_lock); + return 0; +} + +static void service_kcqes(struct cnic_dev *dev, int num_cqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, j; + + i = 0; + j = 1; + while (num_cqes) { + struct cnic_ulp_ops *ulp_ops; + int ulp_type; + u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; + u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; + + if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) + cnic_kwq_completion(dev, 1); + + while (j < num_cqes) { + u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; + + if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) + break; + + if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) + cnic_kwq_completion(dev, 1); + j++; + } + + if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) + ulp_type = CNIC_ULP_RDMA; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) + ulp_type = CNIC_ULP_ISCSI; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) + ulp_type = CNIC_ULP_L4; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) + goto end; + else { + printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n", + dev->netdev->name, kcqe_op_flag); + goto end; + } + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (likely(ulp_ops)) { + ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], + cp->completed_kcq + i, j); + } + rcu_read_unlock(); +end: + num_cqes -= j; + i += j; + j = 1; + } + return; +} + +static u16 cnic_bnx2_next_idx(u16 idx) +{ + return idx + 1; +} + +static u16 cnic_bnx2_hw_idx(u16 idx) +{ + return idx; +} + +static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) +{ + struct cnic_local *cp = dev->cnic_priv; + u16 i, ri, last; + struct kcqe *kcqe; + int kcqe_cnt = 0, last_cnt = 0; + + i = ri = last = *sw_prod; + ri &= MAX_KCQ_IDX; + + while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { + kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; + cp->completed_kcq[kcqe_cnt++] = kcqe; + i = cp->next_idx(i); + ri = i & MAX_KCQ_IDX; + if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { + last_cnt = kcqe_cnt; + last = i; + } + } + + *sw_prod = last; + return last_cnt; +} + +static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) +{ + u16 rx_cons = *cp->rx_cons_ptr; + u16 tx_cons = *cp->tx_cons_ptr; + + if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { + cp->tx_cons = tx_cons; + cp->rx_cons = rx_cons; + uio_event_notify(cp->cnic_uinfo); + } +} + +static int cnic_service_bnx2(void *data, void *status_blk) +{ + struct cnic_dev *dev = data; + struct status_block *sblk = status_blk; + struct cnic_local *cp = dev->cnic_priv; + u32 status_idx = sblk->status_idx; + u16 hw_prod, sw_prod; + int kcqe_cnt; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) + return status_idx; + + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + + hw_prod = sblk->status_completion_producer_index; + sw_prod = cp->kcq_prod_idx; + while (sw_prod != hw_prod) { + kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); + if (kcqe_cnt == 0) + goto done; + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that status_blk fields can change. */ + barrier(); + if (status_idx != sblk->status_idx) { + status_idx = sblk->status_idx; + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + hw_prod = sblk->status_completion_producer_index; + } else + break; + } + +done: + CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); + + cp->kcq_prod_idx = sw_prod; + + cnic_chk_bnx2_pkt_rings(cp); + return status_idx; +} + +static void cnic_service_bnx2_msix(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + struct status_block_msix *status_blk = cp->bnx2_status_blk; + u32 status_idx = status_blk->status_idx; + u16 hw_prod, sw_prod; + int kcqe_cnt; + + cp->kwq_con_idx = status_blk->status_cmd_consumer_index; + + hw_prod = status_blk->status_completion_producer_index; + sw_prod = cp->kcq_prod_idx; + while (sw_prod != hw_prod) { + kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); + if (kcqe_cnt == 0) + goto done; + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that status_blk fields can change. */ + barrier(); + if (status_idx != status_blk->status_idx) { + status_idx = status_blk->status_idx; + cp->kwq_con_idx = status_blk->status_cmd_consumer_index; + hw_prod = status_blk->status_completion_producer_index; + } else + break; + } + +done: + CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); + cp->kcq_prod_idx = sw_prod; + + cnic_chk_bnx2_pkt_rings(cp); + + cp->last_status_idx = status_idx; + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static irqreturn_t cnic_irq(int irq, void *dev_instance) +{ + struct cnic_dev *dev = dev_instance; + struct cnic_local *cp = dev->cnic_priv; + u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; + + if (cp->ack_int) + cp->ack_int(dev); + + prefetch(cp->status_blk); + prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) + tasklet_schedule(&cp->cnic_irq_task); + + return IRQ_HANDLED; +} + +static void cnic_ulp_stop(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops) + continue; + + if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_stop(cp->ulp_handle[if_type]); + } + rcu_read_unlock(); +} + +static void cnic_ulp_start(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops || !ulp_ops->cnic_start) + continue; + + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_start(cp->ulp_handle[if_type]); + } + rcu_read_unlock(); +} + +static int cnic_ctl(void *data, struct cnic_ctl_info *info) +{ + struct cnic_dev *dev = data; + + switch (info->cmd) { + case CNIC_CTL_STOP_CMD: + cnic_hold(dev); + mutex_lock(&cnic_lock); + + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + + mutex_unlock(&cnic_lock); + cnic_put(dev); + break; + case CNIC_CTL_START_CMD: + cnic_hold(dev); + mutex_lock(&cnic_lock); + + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + + mutex_unlock(&cnic_lock); + cnic_put(dev); + break; + default: + return -EINVAL; + } + return 0; +} + +static void cnic_ulp_init(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + rcu_read_lock(); + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); + if (!ulp_ops || !ulp_ops->cnic_init) + continue; + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_init(dev); + + } + rcu_read_unlock(); +} + +static void cnic_ulp_exit(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + rcu_read_lock(); + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); + if (!ulp_ops || !ulp_ops->cnic_exit) + continue; + + if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_exit(dev); + + } + rcu_read_unlock(); +} + +static int cnic_cm_offload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_offload_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; + l4kwqe->l2hdr_nbytes = ETH_HLEN; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->sa0 = dev->mac_addr[0]; + l4kwqe->sa1 = dev->mac_addr[1]; + l4kwqe->sa2 = dev->mac_addr[2]; + l4kwqe->sa3 = dev->mac_addr[3]; + l4kwqe->sa4 = dev->mac_addr[4]; + l4kwqe->sa5 = dev->mac_addr[5]; + + l4kwqe->etype = ETH_P_IP; + l4kwqe->ipid_count = DEF_IPID_COUNT; + l4kwqe->host_opaque = csk->l5_cid; + + if (csk->vlan_id) { + l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; + l4kwqe->vlan_tag = csk->vlan_id; + l4kwqe->l2hdr_nbytes += 4; + } + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_update_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_update_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; + l4kwqe->pg_cid = csk->pg_cid; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->pg_host_opaque = csk->l5_cid; + l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_upload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_upload *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->pg_cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_conn_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_connect_req1 *l4kwqe1; + struct l4_kwq_connect_req2 *l4kwqe2; + struct l4_kwq_connect_req3 *l4kwqe3; + struct kwqe *wqes[3]; + u8 tcp_flags = 0; + int num_wqes = 2; + + l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; + l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; + l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; + memset(l4kwqe1, 0, sizeof(*l4kwqe1)); + memset(l4kwqe2, 0, sizeof(*l4kwqe2)); + memset(l4kwqe3, 0, sizeof(*l4kwqe3)); + + l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; + l4kwqe3->flags = + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; + l4kwqe3->ka_timeout = csk->ka_timeout; + l4kwqe3->ka_interval = csk->ka_interval; + l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; + l4kwqe3->tos = csk->tos; + l4kwqe3->ttl = csk->ttl; + l4kwqe3->snd_seq_scale = csk->snd_seq_scale; + l4kwqe3->pmtu = csk->mtu; + l4kwqe3->rcv_buf = csk->rcv_buf; + l4kwqe3->snd_buf = csk->snd_buf; + l4kwqe3->seed = csk->seed; + + wqes[0] = (struct kwqe *) l4kwqe1; + if (test_bit(SK_F_IPV6, &csk->flags)) { + wqes[1] = (struct kwqe *) l4kwqe2; + wqes[2] = (struct kwqe *) l4kwqe3; + num_wqes = 3; + + l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; + l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; + l4kwqe2->flags = + L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; + l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); + l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); + l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); + l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); + l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); + l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - + sizeof(struct tcphdr); + } else { + wqes[1] = (struct kwqe *) l4kwqe3; + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - + sizeof(struct tcphdr); + } + + l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; + l4kwqe1->flags = + (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | + L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; + l4kwqe1->cid = csk->cid; + l4kwqe1->pg_cid = csk->pg_cid; + l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); + l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); + l4kwqe1->src_port = be16_to_cpu(csk->src_port); + l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); + if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; + if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; + if (csk->tcp_flags & SK_TCP_NAGLE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; + if (csk->tcp_flags & SK_TCP_TIMESTAMP) + tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; + if (csk->tcp_flags & SK_TCP_SACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; + if (csk->tcp_flags & SK_TCP_SEG_SCALING) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; + + l4kwqe1->tcp_flags = tcp_flags; + + return dev->submit_kwqes(dev, wqes, num_wqes); +} + +static int cnic_cm_close_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_close_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_abort_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_reset_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, + u32 l5_cid, struct cnic_sock **csk, void *context) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_sock *csk1; + + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return -EINVAL; + + csk1 = &cp->csk_tbl[l5_cid]; + if (atomic_read(&csk1->ref_count)) + return -EAGAIN; + + if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) + return -EBUSY; + + csk1->dev = dev; + csk1->cid = cid; + csk1->l5_cid = l5_cid; + csk1->ulp_type = ulp_type; + csk1->context = context; + + csk1->ka_timeout = DEF_KA_TIMEOUT; + csk1->ka_interval = DEF_KA_INTERVAL; + csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; + csk1->tos = DEF_TOS; + csk1->ttl = DEF_TTL; + csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; + csk1->rcv_buf = DEF_RCV_BUF; + csk1->snd_buf = DEF_SND_BUF; + csk1->seed = DEF_SEED; + + *csk = csk1; + return 0; +} + +static void cnic_cm_cleanup(struct cnic_sock *csk) +{ + if (csk->src_port) { + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + cnic_free_id(&cp->csk_port_tbl, csk->src_port); + csk->src_port = 0; + } +} + +static void cnic_close_conn(struct cnic_sock *csk) +{ + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { + cnic_cm_upload_pg(csk); + clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + } + cnic_cm_cleanup(csk); +} + +static int cnic_cm_destroy(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + csk_hold(csk); + clear_bit(SK_F_INUSE, &csk->flags); + smp_mb__after_clear_bit(); + while (atomic_read(&csk->ref_count) != 1) + msleep(1); + cnic_cm_cleanup(csk); + + csk->flags = 0; + csk_put(csk); + return 0; +} + +static inline u16 cnic_get_vlan(struct net_device *dev, + struct net_device **vlan_dev) +{ + if (dev->priv_flags & IFF_802_1Q_VLAN) { + *vlan_dev = vlan_dev_real_dev(dev); + return vlan_dev_vlan_id(dev); + } + *vlan_dev = dev; + return 0; +} + +static int cnic_get_v4_route(struct sockaddr_in *dst_addr, + struct dst_entry **dst) +{ + struct flowi fl; + int err; + struct rtable *rt; + + memset(&fl, 0, sizeof(fl)); + fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; + + err = ip_route_output_key(&init_net, &rt, &fl); + if (!err) + *dst = &rt->u.dst; + return err; +} + +static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, + struct dst_entry **dst) +{ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct flowi fl; + + memset(&fl, 0, sizeof(fl)); + ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); + if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) + fl.oif = dst_addr->sin6_scope_id; + + *dst = ip6_route_output(&init_net, NULL, &fl); + if (*dst) + return 0; +#endif + + return -ENETUNREACH; +} + +static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, + int ulp_type) +{ + struct cnic_dev *dev = NULL; + struct dst_entry *dst; + struct net_device *netdev = NULL; + int err = -ENETUNREACH; + + if (dst_addr->sin_family == AF_INET) + err = cnic_get_v4_route(dst_addr, &dst); + else if (dst_addr->sin_family == AF_INET6) { + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *) dst_addr; + + err = cnic_get_v6_route(dst_addr6, &dst); + } else + return NULL; + + if (err) + return NULL; + + if (!dst->dev) + goto done; + + cnic_get_vlan(dst->dev, &netdev); + + dev = cnic_from_netdev(netdev); + +done: + dst_release(dst); + if (dev) + cnic_put(dev); + return dev; +} + +static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); +} + +static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + int is_v6, err, rc = -ENETUNREACH; + struct dst_entry *dst; + struct net_device *realdev; + u32 local_port; + + if (saddr->local.v6.sin6_family == AF_INET6 && + saddr->remote.v6.sin6_family == AF_INET6) + is_v6 = 1; + else if (saddr->local.v4.sin_family == AF_INET && + saddr->remote.v4.sin_family == AF_INET) + is_v6 = 0; + else + return -EINVAL; + + clear_bit(SK_F_IPV6, &csk->flags); + + if (is_v6) { +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + set_bit(SK_F_IPV6, &csk->flags); + err = cnic_get_v6_route(&saddr->remote.v6, &dst); + if (err) + return err; + + if (!dst || dst->error || !dst->dev) + goto err_out; + + memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, + sizeof(struct in6_addr)); + csk->dst_port = saddr->remote.v6.sin6_port; + local_port = saddr->local.v6.sin6_port; +#else + return rc; +#endif + + } else { + err = cnic_get_v4_route(&saddr->remote.v4, &dst); + if (err) + return err; + + if (!dst || dst->error || !dst->dev) + goto err_out; + + csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; + csk->dst_port = saddr->remote.v4.sin_port; + local_port = saddr->local.v4.sin_port; + } + + csk->vlan_id = cnic_get_vlan(dst->dev, &realdev); + if (realdev != dev->netdev) + goto err_out; + + if (local_port >= CNIC_LOCAL_PORT_MIN && + local_port < CNIC_LOCAL_PORT_MAX) { + if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) + local_port = 0; + } else + local_port = 0; + + if (!local_port) { + local_port = cnic_alloc_new_id(&cp->csk_port_tbl); + if (local_port == -1) { + rc = -ENOMEM; + goto err_out; + } + } + csk->src_port = local_port; + + csk->mtu = dst_mtu(dst); + rc = 0; + +err_out: + dst_release(dst); + return rc; +} + +static void cnic_init_csk_state(struct cnic_sock *csk) +{ + csk->state = 0; + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + clear_bit(SK_F_CLOSING, &csk->flags); +} + +static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + int err = 0; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) + return -EINVAL; + + cnic_init_csk_state(csk); + + err = cnic_get_route(csk, saddr); + if (err) + goto err_out; + + err = cnic_resolve_addr(csk, saddr); + if (!err) + return 0; + +err_out: + clear_bit(SK_F_CONNECT_START, &csk->flags); + return err; +} + +static int cnic_cm_abort(struct cnic_sock *csk) +{ + struct cnic_local *cp = csk->dev->cnic_priv; + u32 opcode; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_abort_prep(csk)) + return cnic_cm_abort_req(csk); + + /* Getting here means that we haven't started connect, or + * connect was not successful. + */ + + csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + opcode = csk->state; + else + opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; + cp->close_conn(csk, opcode); + + return 0; +} + +static int cnic_cm_close(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_close_prep(csk)) { + csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + return cnic_cm_close_req(csk); + } + return 0; +} + +static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, + u8 opcode) +{ + struct cnic_ulp_ops *ulp_ops; + int ulp_type = csk->ulp_type; + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (ulp_ops) { + if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) + ulp_ops->cm_connect_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) + ulp_ops->cm_close_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) + ulp_ops->cm_remote_abort(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) + ulp_ops->cm_abort_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) + ulp_ops->cm_remote_close(csk); + } + rcu_read_unlock(); +} + +static int cnic_cm_set_pg(struct cnic_sock *csk) +{ + if (cnic_offld_prep(csk)) { + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cnic_cm_update_pg(csk); + else + cnic_cm_offload_pg(csk); + } + return 0; +} + +static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 l5_cid = kcqe->pg_host_opaque; + u8 opcode = kcqe->op_code; + struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; + + csk_hold(csk); + if (!cnic_in_use(csk)) + goto done; + + if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + goto done; + } + csk->pg_cid = kcqe->pg_cid; + set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + cnic_cm_conn_req(csk); + +done: + csk_put(csk); +} + +static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; + u8 opcode = l4kcqe->op_code; + u32 l5_cid; + struct cnic_sock *csk; + + if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || + opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + cnic_cm_process_offld_pg(dev, l4kcqe); + return; + } + + l5_cid = l4kcqe->conn_id; + if (opcode & 0x80) + l5_cid = l4kcqe->cid; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return; + + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + + if (!cnic_in_use(csk)) { + csk_put(csk); + return; + } + + switch (opcode) { + case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: + if (l4kcqe->status == 0) + set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); + + smp_mb__before_clear_bit(); + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) + csk->state = opcode; + /* fall through */ + case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: + case L4_KCQE_OPCODE_VALUE_RESET_COMP: + cp->close_conn(csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: + cnic_cm_upcall(cp, csk, opcode); + break; + } + csk_put(csk); +} + +static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) +{ + struct cnic_dev *dev = data; + int i; + + for (i = 0; i < num; i++) + cnic_cm_process_kcqe(dev, kcqe[i]); +} + +static struct cnic_ulp_ops cm_ulp_ops = { + .indicate_kcqes = cnic_cm_indicate_kcqe, +}; + +static void cnic_cm_free_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + kfree(cp->csk_tbl); + cp->csk_tbl = NULL; + cnic_free_id_tbl(&cp->csk_port_tbl); +} + +static int cnic_cm_alloc_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, + GFP_KERNEL); + if (!cp->csk_tbl) + return -ENOMEM; + + if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, + CNIC_LOCAL_PORT_MIN)) { + cnic_cm_free_mem(dev); + return -ENOMEM; + } + return 0; +} + +static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) +{ + if ((opcode == csk->state) || + (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { + if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) + return 1; + } + return 0; +} + +static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(SK_F_CONNECT_START, &csk->flags); + if (cnic_ready_to_close(csk, opcode)) { + cnic_close_conn(csk); + cnic_cm_upcall(cp, csk, opcode); + } +} + +static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) +{ +} + +static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) +{ + u32 seed; + + get_random_bytes(&seed, 4); + cnic_ctx_wr(dev, 45, 0, seed); + return 0; +} + +static int cnic_cm_open(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int err; + + err = cnic_cm_alloc_mem(dev); + if (err) + return err; + + err = cp->start_cm(dev); + + if (err) + goto err_out; + + dev->cm_create = cnic_cm_create; + dev->cm_destroy = cnic_cm_destroy; + dev->cm_connect = cnic_cm_connect; + dev->cm_abort = cnic_cm_abort; + dev->cm_close = cnic_cm_close; + dev->cm_select_dev = cnic_cm_select_dev; + + cp->ulp_handle[CNIC_ULP_L4] = dev; + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); + return 0; + +err_out: + cnic_cm_free_mem(dev); + return err; +} + +static int cnic_cm_shutdown(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i; + + cp->stop_cm(dev); + + if (!cp->csk_tbl) + return 0; + + for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { + struct cnic_sock *csk = &cp->csk_tbl[i]; + + clear_bit(SK_F_INUSE, &csk->flags); + cnic_cm_cleanup(csk); + } + cnic_cm_free_mem(dev); + + return 0; +} + +static void cnic_init_context(struct cnic_dev *dev, u32 cid) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 cid_addr; + int i; + + if (CHIP_NUM(cp) == CHIP_NUM_5709) + return; + + cid_addr = GET_CID_ADDR(cid); + + for (i = 0; i < CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr, i, 0); +} + +static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) +{ + struct cnic_local *cp = dev->cnic_priv; + int ret = 0, i; + u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; + + if (CHIP_NUM(cp) != CHIP_NUM_5709) + return 0; + + for (i = 0; i < cp->ctx_blks; i++) { + int j; + u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; + u32 val; + + memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); + + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) cp->ctx_arr[i].mapping >> 32); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + for (j = 0; j < 10; j++) { + + val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); + if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) + break; + udelay(5); + } + if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { + ret = -EBUSY; + break; + } + } + return ret; +} + +static void cnic_free_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + cp->disable_int_sync(dev); + tasklet_disable(&cp->cnic_irq_task); + free_irq(ethdev->irq_arr[0].vector, dev); + } +} + +static int cnic_init_bnx2_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + int err, i = 0; + int sblk_num = cp->status_blk_num; + u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + + BNX2_HC_SB_CONFIG_1; + + CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); + + CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); + CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); + CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); + + cp->bnx2_status_blk = cp->status_blk; + cp->last_status_idx = cp->bnx2_status_blk->status_idx; + tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, + (unsigned long) dev); + err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, + "cnic", dev); + if (err) { + tasklet_disable(&cp->cnic_irq_task); + return err; + } + while (cp->bnx2_status_blk->status_completion_producer_index && + i < 10) { + CNIC_WR(dev, BNX2_HC_COALESCE_NOW, + 1 << (11 + sblk_num)); + udelay(10); + i++; + barrier(); + } + if (cp->bnx2_status_blk->status_completion_producer_index) { + cnic_free_irq(dev); + goto failed; + } + + } else { + struct status_block *sblk = cp->status_blk; + u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); + int i = 0; + + while (sblk->status_completion_producer_index && i < 10) { + CNIC_WR(dev, BNX2_HC_COMMAND, + hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + udelay(10); + i++; + barrier(); + } + if (sblk->status_completion_producer_index) + goto failed; + + } + return 0; + +failed: + printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n", + dev->netdev->name); + return -EBUSY; +} + +static void cnic_enable_bnx2_int(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); + synchronize_irq(ethdev->irq_arr[0].vector); +} + +static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 cid_addr, tx_cid, sb_id; + u32 val, offset0, offset1, offset2, offset3; + int i; + struct tx_bd *txbd; + dma_addr_t buf_map; + struct status_block *s_blk = cp->status_blk; + + sb_id = cp->status_blk_num; + tx_cid = 20; + cnic_init_context(dev, tx_cid); + cnic_init_context(dev, tx_cid + 1); + cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk; + + tx_cid = TX_TSS_CID + sb_id - 1; + cnic_init_context(dev, tx_cid); + CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | + (TX_TSS_CID << 7)); + cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; + } + cp->tx_cons = *cp->tx_cons_ptr; + + cid_addr = GET_CID_ADDR(tx_cid); + if (CHIP_NUM(cp) == CHIP_NUM_5709) { + u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; + + for (i = 0; i < PHY_CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr2, i, 0); + + offset0 = BNX2_L2CTX_TYPE_XI; + offset1 = BNX2_L2CTX_CMD_TYPE_XI; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; + } else { + offset0 = BNX2_L2CTX_TYPE; + offset1 = BNX2_L2CTX_CMD_TYPE; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; + } + val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; + cnic_ctx_wr(dev, cid_addr, offset0, val); + + val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); + cnic_ctx_wr(dev, cid_addr, offset1, val); + + txbd = (struct tx_bd *) cp->l2_ring; + + buf_map = cp->l2_buf_map; + for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { + txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; + txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) cp->l2_ring_map >> 32; + cnic_ctx_wr(dev, cid_addr, offset2, val); + txbd->tx_bd_haddr_hi = val; + + val = (u64) cp->l2_ring_map & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, offset3, val); + txbd->tx_bd_haddr_lo = val; +} + +static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 cid_addr, sb_id, val, coal_reg, coal_val; + int i; + struct rx_bd *rxbd; + struct status_block *s_blk = cp->status_blk; + + sb_id = cp->status_blk_num; + cnic_init_context(dev, 2); + cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; + coal_reg = BNX2_HC_COMMAND; + coal_val = CNIC_RD(dev, coal_reg); + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk; + + cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; + coal_reg = BNX2_HC_COALESCE_NOW; + coal_val = 1 << (11 + sb_id); + } + i = 0; + while (!(*cp->rx_cons_ptr != 0) && i < 10) { + CNIC_WR(dev, coal_reg, coal_val); + udelay(10); + i++; + barrier(); + } + cp->rx_cons = *cp->rx_cons_ptr; + + cid_addr = GET_CID_ADDR(2); + val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | + BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); + + if (sb_id == 0) + val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; + else + val = BNX2_L2CTX_STATUSB_NUM(sb_id); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); + + rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); + for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { + dma_addr_t buf_map; + int n = (i % cp->l2_rx_ring_size) + 1; + + buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); + rxbd->rx_bd_len = cp->l2_single_buf_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; + rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); + rxbd->rx_bd_haddr_hi = val; + + val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); + rxbd->rx_bd_haddr_lo = val; + + val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); + cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); +} + +static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct kwqe *wqes[1], l2kwqe; + + memset(&l2kwqe, 0, sizeof(l2kwqe)); + wqes[0] = &l2kwqe; + l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | + (L2_KWQE_OPCODE_VALUE_FLUSH << + KWQE_OPCODE_SHIFT) | 2; + dev->submit_kwqes(dev, wqes, 1); +} + +static void cnic_set_bnx2_mac(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 val; + + val = cp->func << 2; + + cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); + dev->mac_addr[0] = (u8) (val >> 8); + dev->mac_addr[1] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); + dev->mac_addr[2] = (u8) (val >> 24); + dev->mac_addr[3] = (u8) (val >> 16); + dev->mac_addr[4] = (u8) (val >> 8); + dev->mac_addr[5] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); + + val = 4 | BNX2_RPM_SORT_USER2_BC_EN; + if (CHIP_NUM(cp) != CHIP_NUM_5709) + val |= BNX2_RPM_SORT_USER2_PROM_VLAN; + + CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); +} + +static int cnic_start_bnx2_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct status_block *sblk = cp->status_blk; + u32 val; + int err; + + cnic_set_bnx2_mac(dev); + + val = CNIC_RD(dev, BNX2_MQ_CONFIG); + val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; + if (BCM_PAGE_BITS > 12) + val |= (12 - 8) << 4; + else + val |= (BCM_PAGE_BITS - 8) << 4; + + CNIC_WR(dev, BNX2_MQ_CONFIG, val); + + CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); + CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); + CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); + + err = cnic_setup_5709_context(dev, 1); + if (err) + return err; + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); + cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->max_kwq_idx = MAX_KWQ_IDX; + cp->kwq_prod_idx = 0; + cp->kwq_con_idx = 0; + cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT; + + if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) + cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; + else + cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; + + /* Initialize the kernel work queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kwq_info.pgtbl_map; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); + cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->kcq_prod_idx = 0; + + /* Initialize the kernel complete queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kcq_info.pgtbl_map; + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + cp->int_num = 0; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + u32 sb_id = cp->status_blk_num; + u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); + + cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; + cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + } + + /* Enable Commnad Scheduler notification when we write to the + * host producer index of the kernel contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); + + /* Enable Command Scheduler notification when we write to either + * the Send Queue or Receive Queue producer indexes of the kernel + * bypass contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); + CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); + + /* Notify COM when the driver post an application buffer. */ + CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); + + /* Set the CP and COM doorbells. These two processors polls the + * doorbell for a non zero value before running. This must be done + * after setting up the kernel queue contexts. */ + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); + + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + + err = cnic_init_bnx2_irq(dev); + if (err) { + printk(KERN_ERR PFX "%s: cnic_init_irq failed\n", + dev->netdev->name); + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + return err; + } + + return 0; +} + +static int cnic_start_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EALREADY; + + err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); + if (err) { + printk(KERN_ERR PFX "%s: register_cnic failed\n", + dev->netdev->name); + goto err2; + } + + dev->regview = ethdev->io_base; + cp->chip_id = ethdev->chip_id; + pci_dev_get(dev->pcidev); + cp->func = PCI_FUNC(dev->pcidev->devfn); + cp->status_blk = ethdev->irq_arr[0].status_blk; + cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; + + err = cp->alloc_resc(dev); + if (err) { + printk(KERN_ERR PFX "%s: allocate resource failure\n", + dev->netdev->name); + goto err1; + } + + err = cp->start_hw(dev); + if (err) + goto err1; + + err = cnic_cm_open(dev); + if (err) + goto err1; + + set_bit(CNIC_F_CNIC_UP, &dev->flags); + + cp->enable_int(dev); + + return 0; + +err1: + ethdev->drv_unregister_cnic(dev->netdev); + cp->free_resc(dev); + pci_dev_put(dev->pcidev); +err2: + return err; +} + +static void cnic_stop_bnx2_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + cnic_disable_bnx2_int_sync(dev); + + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + cnic_setup_5709_context(dev, 0); + cnic_free_irq(dev); + + ethdev->drv_unregister_cnic(dev->netdev); + + cnic_free_resc(dev); +} + +static void cnic_stop_hw(struct cnic_dev *dev) +{ + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(CNIC_F_CNIC_UP, &dev->flags); + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); + synchronize_rcu(); + cnic_cm_shutdown(dev); + cp->stop_hw(dev); + pci_dev_put(dev->pcidev); + } +} + +static void cnic_free_dev(struct cnic_dev *dev) +{ + int i = 0; + + while ((atomic_read(&dev->ref_count) != 0) && i < 10) { + msleep(100); + i++; + } + if (atomic_read(&dev->ref_count) != 0) + printk(KERN_ERR PFX "%s: Failed waiting for ref count to go" + " to zero.\n", dev->netdev->name); + + printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name); + dev_put(dev->netdev); + kfree(dev); +} + +static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, + struct pci_dev *pdev) +{ + struct cnic_dev *cdev; + struct cnic_local *cp; + int alloc_size; + + alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); + + cdev = kzalloc(alloc_size , GFP_KERNEL); + if (cdev == NULL) { + printk(KERN_ERR PFX "%s: allocate dev struct failure\n", + dev->name); + return NULL; + } + + cdev->netdev = dev; + cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); + cdev->register_device = cnic_register_device; + cdev->unregister_device = cnic_unregister_device; + cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; + + cp = cdev->cnic_priv; + cp->dev = cdev; + cp->uio_dev = -1; + cp->l2_single_buf_size = 0x400; + cp->l2_rx_ring_size = 3; + + spin_lock_init(&cp->cnic_ulp_lock); + + printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name); + + return cdev; +} + +static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) +{ + struct pci_dev *pdev; + struct cnic_dev *cdev; + struct cnic_local *cp; + struct cnic_eth_dev *ethdev = NULL; + struct cnic_eth_dev *(*probe)(void *) = NULL; + + probe = __symbol_get("bnx2_cnic_probe"); + if (probe) { + ethdev = (*probe)(dev); + symbol_put_addr(probe); + } + if (!ethdev) + return NULL; + + pdev = ethdev->pdev; + if (!pdev) + return NULL; + + dev_hold(dev); + pci_dev_get(pdev); + if (pdev->device == PCI_DEVICE_ID_NX2_5709 || + pdev->device == PCI_DEVICE_ID_NX2_5709S) { + u8 rev; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if (rev < 0x10) { + pci_dev_put(pdev); + goto cnic_err; + } + } + pci_dev_put(pdev); + + cdev = cnic_alloc_dev(dev, pdev); + if (cdev == NULL) + goto cnic_err; + + set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); + cdev->submit_kwqes = cnic_submit_bnx2_kwqes; + + cp = cdev->cnic_priv; + cp->ethdev = ethdev; + cdev->pcidev = pdev; + + cp->cnic_ops = &cnic_bnx2_ops; + cp->start_hw = cnic_start_bnx2_hw; + cp->stop_hw = cnic_stop_bnx2_hw; + cp->setup_pgtbl = cnic_setup_page_tbl; + cp->alloc_resc = cnic_alloc_bnx2_resc; + cp->free_resc = cnic_free_resc; + cp->start_cm = cnic_cm_init_bnx2_hw; + cp->stop_cm = cnic_cm_stop_bnx2_hw; + cp->enable_int = cnic_enable_bnx2_int; + cp->disable_int_sync = cnic_disable_bnx2_int_sync; + cp->close_conn = cnic_close_bnx2_conn; + cp->next_idx = cnic_bnx2_next_idx; + cp->hw_idx = cnic_bnx2_hw_idx; + return cdev; + +cnic_err: + dev_put(dev); + return NULL; +} + +static struct cnic_dev *is_cnic_dev(struct net_device *dev) +{ + struct ethtool_drvinfo drvinfo; + struct cnic_dev *cdev = NULL; + + if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + dev->ethtool_ops->get_drvinfo(dev, &drvinfo); + + if (!strcmp(drvinfo.driver, "bnx2")) + cdev = init_bnx2_cnic(dev); + if (cdev) { + write_lock(&cnic_dev_lock); + list_add(&cdev->list, &cnic_dev_list); + write_unlock(&cnic_dev_lock); + } + } + return cdev; +} + +/** + * netdev event handler + */ +static int cnic_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *netdev = ptr; + struct cnic_dev *dev; + int if_type; + int new_dev = 0; + + dev = cnic_from_netdev(netdev); + + if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { + /* Check for the hot-plug device */ + dev = is_cnic_dev(netdev); + if (dev) { + new_dev = 1; + cnic_hold(dev); + } + } + if (dev) { + struct cnic_local *cp = dev->cnic_priv; + + if (new_dev) + cnic_ulp_init(dev); + else if (event == NETDEV_UNREGISTER) + cnic_ulp_exit(dev); + else if (event == NETDEV_UP) { + mutex_lock(&cnic_lock); + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + mutex_unlock(&cnic_lock); + } + + rcu_read_lock(); + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + void *ctx; + + ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); + if (!ulp_ops || !ulp_ops->indicate_netevent) + continue; + + ctx = cp->ulp_handle[if_type]; + + ulp_ops->indicate_netevent(ctx, event); + } + rcu_read_unlock(); + + if (event == NETDEV_GOING_DOWN) { + mutex_lock(&cnic_lock); + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + mutex_unlock(&cnic_lock); + } else if (event == NETDEV_UNREGISTER) { + write_lock(&cnic_dev_lock); + list_del_init(&dev->list); + write_unlock(&cnic_dev_lock); + + cnic_put(dev); + cnic_free_dev(dev); + goto done; + } + cnic_put(dev); + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block cnic_netdev_notifier = { + .notifier_call = cnic_netdev_event +}; + +static void cnic_release(void) +{ + struct cnic_dev *dev; + + while (!list_empty(&cnic_dev_list)) { + dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + } + + cnic_ulp_exit(dev); + list_del_init(&dev->list); + cnic_free_dev(dev); + } +} + +static int __init cnic_init(void) +{ + int rc = 0; + + printk(KERN_INFO "%s", version); + + rc = register_netdevice_notifier(&cnic_netdev_notifier); + if (rc) { + cnic_release(); + return rc; + } + + return 0; +} + +static void __exit cnic_exit(void) +{ + unregister_netdevice_notifier(&cnic_netdev_notifier); + cnic_release(); + return; +} + +module_init(cnic_init); +module_exit(cnic_exit); diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h new file mode 100644 index 00000000000..5192d4a9df5 --- /dev/null +++ b/drivers/net/cnic.h @@ -0,0 +1,299 @@ +/* cnic.h: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_H +#define CNIC_H + +#define KWQ_PAGE_CNT 4 +#define KCQ_PAGE_CNT 16 + +#define KWQ_CID 24 +#define KCQ_CID 25 + +/* + * krnlq_context definition + */ +#define L5_KRNLQ_FLAGS 0x00000000 +#define L5_KRNLQ_SIZE 0x00000000 +#define L5_KRNLQ_TYPE 0x00000000 +#define KRNLQ_FLAGS_PG_SZ (0xf<<0) +#define KRNLQ_FLAGS_PG_SZ_256 (0<<0) +#define KRNLQ_FLAGS_PG_SZ_512 (1<<0) +#define KRNLQ_FLAGS_PG_SZ_1K (2<<0) +#define KRNLQ_FLAGS_PG_SZ_2K (3<<0) +#define KRNLQ_FLAGS_PG_SZ_4K (4<<0) +#define KRNLQ_FLAGS_PG_SZ_8K (5<<0) +#define KRNLQ_FLAGS_PG_SZ_16K (6<<0) +#define KRNLQ_FLAGS_PG_SZ_32K (7<<0) +#define KRNLQ_FLAGS_PG_SZ_64K (8<<0) +#define KRNLQ_FLAGS_PG_SZ_128K (9<<0) +#define KRNLQ_FLAGS_PG_SZ_256K (10<<0) +#define KRNLQ_FLAGS_PG_SZ_512K (11<<0) +#define KRNLQ_FLAGS_PG_SZ_1M (12<<0) +#define KRNLQ_FLAGS_PG_SZ_2M (13<<0) +#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15) +#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16) +#define KRNLQ_TYPE_TYPE (0xf<<28) +#define KRNLQ_TYPE_TYPE_EMPTY (0<<28) +#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28) + +#define L5_KRNLQ_HOST_QIDX 0x00000004 +#define L5_KRNLQ_HOST_FW_QIDX 0x00000008 +#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c +#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c +#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010 +#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014 +#define L5_KRNLQ_PGTBL_PGIDX 0x00000018 +#define L5_KRNLQ_NX_PG_QIDX 0x00000018 +#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c +#define L5_KRNLQ_QIDX_INCR 0x0000001c +#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020 +#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024 + +#define BNX2_PG_CTX_MAP 0x1a0034 +#define BNX2_ISCSI_CTX_MAP 0x1a0074 + +struct cnic_redirect_entry { + struct dst_entry *old_dst; + struct dst_entry *new_dst; +}; + +#define MAX_COMPLETED_KCQE 64 + +#define MAX_CNIC_L5_CONTEXT 256 + +#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT + +#define MAX_ISCSI_TBL_SZ 256 + +#define CNIC_LOCAL_PORT_MIN 60000 +#define CNIC_LOCAL_PORT_MAX 61000 +#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) + +#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) +#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) +#define MAX_KWQE_CNT (KWQE_CNT - 1) +#define MAX_KCQE_CNT (KCQE_CNT - 1) + +#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) +#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) + +#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) +#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) + +#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) +#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) + +#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ + (MAX_KCQE_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA(cp, x) \ + &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] + +#define DEF_IPID_COUNT 0xc001 + +#define DEF_KA_TIMEOUT 10000 +#define DEF_KA_INTERVAL 300000 +#define DEF_KA_MAX_PROBE_COUNT 3 +#define DEF_TOS 0 +#define DEF_TTL 0xfe +#define DEF_SND_SEQ_SCALE 0 +#define DEF_RCV_BUF 0xffff +#define DEF_SND_BUF 0xffff +#define DEF_SEED 0 +#define DEF_MAX_RT_TIME 500 +#define DEF_MAX_DA_COUNT 2 +#define DEF_SWS_TIMER 1000 +#define DEF_MAX_CWND 0xffff + +struct cnic_ctx { + u32 cid; + void *ctx; + dma_addr_t mapping; +}; + +#define BNX2_MAX_CID 0x2000 + +struct cnic_dma { + int num_pages; + void **pg_arr; + dma_addr_t *pg_map_arr; + int pgtbl_size; + u32 *pgtbl; + dma_addr_t pgtbl_map; +}; + +struct cnic_id_tbl { + spinlock_t lock; + u32 start; + u32 max; + u32 next; + unsigned long *table; +}; + +#define CNIC_KWQ16_DATA_SIZE 128 + +struct kwqe_16_data { + u8 data[CNIC_KWQ16_DATA_SIZE]; +}; + +struct cnic_iscsi { + struct cnic_dma task_array_info; + struct cnic_dma r2tq_info; + struct cnic_dma hq_info; +}; + +struct cnic_context { + u32 cid; + struct kwqe_16_data *kwqe_data; + dma_addr_t kwqe_data_mapping; + wait_queue_head_t waitq; + int wait_cond; + unsigned long timestamp; + u32 ctx_flags; +#define CTX_FL_OFFLD_START 0x00000001 + u8 ulp_proto_id; + union { + struct cnic_iscsi *iscsi; + } proto; +}; + +struct cnic_local { + + spinlock_t cnic_ulp_lock; + void *ulp_handle[MAX_CNIC_ULP_TYPE]; + unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; +#define ULP_F_INIT 0 +#define ULP_F_START 1 + struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; + + /* protected by ulp_lock */ + u32 cnic_local_flags; +#define CNIC_LCL_FL_KWQ_INIT 0x00000001 + + struct cnic_dev *dev; + + struct cnic_eth_dev *ethdev; + + void *l2_ring; + dma_addr_t l2_ring_map; + int l2_ring_size; + int l2_rx_ring_size; + + void *l2_buf; + dma_addr_t l2_buf_map; + int l2_buf_size; + int l2_single_buf_size; + + u16 *rx_cons_ptr; + u16 *tx_cons_ptr; + u16 rx_cons; + u16 tx_cons; + + u32 kwq_cid_addr; + u32 kcq_cid_addr; + + struct cnic_dma kwq_info; + struct kwqe **kwq; + + struct cnic_dma kwq_16_data_info; + + u16 max_kwq_idx; + + u16 kwq_prod_idx; + u32 kwq_io_addr; + + u16 *kwq_con_idx_ptr; + u16 kwq_con_idx; + + struct cnic_dma kcq_info; + struct kcqe **kcq; + + u16 kcq_prod_idx; + u32 kcq_io_addr; + + void *status_blk; + struct status_block_msix *bnx2_status_blk; + struct host_status_block *bnx2x_status_blk; + + u32 status_blk_num; + u32 int_num; + u32 last_status_idx; + struct tasklet_struct cnic_irq_task; + + struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; + + struct cnic_sock *csk_tbl; + struct cnic_id_tbl csk_port_tbl; + + struct cnic_dma conn_buf_info; + struct cnic_dma gbl_buf_info; + + struct cnic_iscsi *iscsi_tbl; + struct cnic_context *ctx_tbl; + struct cnic_id_tbl cid_tbl; + int max_iscsi_conn; + atomic_t iscsi_conn; + + /* per connection parameters */ + int num_iscsi_tasks; + int num_ccells; + int task_array_size; + int r2tq_size; + int hq_size; + int num_cqs; + + struct cnic_ctx *ctx_arr; + int ctx_blks; + int ctx_blk_size; + int cids_per_blk; + + u32 chip_id; + int func; + u32 shmem_base; + + u32 uio_dev; + struct uio_info *cnic_uinfo; + + struct cnic_ops *cnic_ops; + int (*start_hw)(struct cnic_dev *); + void (*stop_hw)(struct cnic_dev *); + void (*setup_pgtbl)(struct cnic_dev *, + struct cnic_dma *); + int (*alloc_resc)(struct cnic_dev *); + void (*free_resc)(struct cnic_dev *); + int (*start_cm)(struct cnic_dev *); + void (*stop_cm)(struct cnic_dev *); + void (*enable_int)(struct cnic_dev *); + void (*disable_int_sync)(struct cnic_dev *); + void (*ack_int)(struct cnic_dev *); + void (*close_conn)(struct cnic_sock *, u32 opcode); + u16 (*next_idx)(u16); + u16 (*hw_idx)(u16); +}; + +struct bnx2x_bd_chain_next { + u32 addr_lo; + u32 addr_hi; + u8 reserved[8]; +}; + +#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) +#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) + +#define CDU_REGION_NUMBER_XCM_AG 2 +#define CDU_REGION_NUMBER_UCM_AG 4 + +#endif + diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h new file mode 100644 index 00000000000..cee80f69445 --- /dev/null +++ b/drivers/net/cnic_defs.h @@ -0,0 +1,580 @@ + +/* cnic.c: Broadcom CNIC core network driver. + * + * Copyright (c) 2006-2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef CNIC_DEFS_H +#define CNIC_DEFS_H + +/* KWQ (kernel work queue) request op codes */ +#define L2_KWQE_OPCODE_VALUE_FLUSH (4) + +#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) +#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) +#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52) +#define L4_KWQE_OPCODE_VALUE_RESET (53) +#define L4_KWQE_OPCODE_VALUE_CLOSE (54) +#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60) +#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14) + +#define L5CM_RAMROD_CMD_ID_BASE (0x80) +#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3) +#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12) +#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13) +#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) +#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) + +/* KCQ (kernel completion queue) response op codes */ +#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) +#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) +#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55) +#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56) +#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57) +#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58) +#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) + +/* KCQ (kernel completion queue) completion status */ +#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) +#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) + +#define L4_LAYER_CODE (4) +#define L2_LAYER_CODE (2) + +/* + * L4 KCQ CQE + */ +struct l4_kcq { + u32 cid; + u32 pg_cid; + u32 conn_id; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u16 status; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u16 status; +#endif + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_RESERVED3 (0x7<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_RESERVED3 (0xF<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * L4 KCQ CQE PG upload + */ +struct l4_kcq_upload_pg { + u32 pg_cid; +#if defined(__BIG_ENDIAN) + u16 pg_status; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u16 pg_status; +#endif + u32 reserved1[5]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * Gracefully close the connection request + */ +struct l4_kwq_close_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * The first request to be passed in order to establish connection in option2 + */ +struct l4_kwq_connect_req1 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 pg_cid; + u32 src_ip; + u32 dst_ip; +#if defined(__BIG_ENDIAN) + u16 dst_port; + u16 src_port; +#elif defined(__LITTLE_ENDIAN) + u16 src_port; + u16 dst_port; +#endif +#if defined(__BIG_ENDIAN) + u8 rsrv1[3]; + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 + u8 rsrv1[3]; +#endif + u32 rsrv2; +}; + + +/* + * The second ( optional )request to be passed in order to establish + * connection in option2 - for IPv6 only + */ +struct l4_kwq_connect_req2 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 rsrv; +#elif defined(__LITTLE_ENDIAN) + u8 rsrv; + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 reserved2; + u32 src_ip_v6_2; + u32 src_ip_v6_3; + u32 src_ip_v6_4; + u32 dst_ip_v6_2; + u32 dst_ip_v6_3; + u32 dst_ip_v6_4; +}; + + +/* + * The third ( and last )request to be passed in order to establish + * connection in option2 + */ +struct l4_kwq_connect_req3 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 ka_timeout; + u32 ka_interval ; +#if defined(__BIG_ENDIAN) + u8 snd_seq_scale; + u8 ttl; + u8 tos; + u8 ka_max_probe_count; +#elif defined(__LITTLE_ENDIAN) + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; +#endif +#if defined(__BIG_ENDIAN) + u16 pmtu; + u16 mss; +#elif defined(__LITTLE_ENDIAN) + u16 mss; + u16 pmtu; +#endif + u32 rcv_buf; + u32 snd_buf; + u32 seed; +}; + + +/* + * a KWQE request to offload a PG connection + */ +struct l4_kwq_offload_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif +#if defined(__BIG_ENDIAN) + u8 l2hdr_nbytes; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 l2hdr_nbytes; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif +#if defined(__BIG_ENDIAN) + u8 sa0; + u8 sa1; + u8 sa2; + u8 sa3; +#elif defined(__LITTLE_ENDIAN) + u8 sa3; + u8 sa2; + u8 sa1; + u8 sa0; +#endif +#if defined(__BIG_ENDIAN) + u8 sa4; + u8 sa5; + u16 etype; +#elif defined(__LITTLE_ENDIAN) + u16 etype; + u8 sa5; + u8 sa4; +#endif +#if defined(__BIG_ENDIAN) + u16 vlan_tag; + u16 ipid_start; +#elif defined(__LITTLE_ENDIAN) + u16 ipid_start; + u16 vlan_tag; +#endif +#if defined(__BIG_ENDIAN) + u16 ipid_count; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 ipid_count; +#endif + u32 host_opaque; +}; + + +/* + * Abortively close the connection request + */ +struct l4_kwq_reset_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * a KWQE request to update a PG connection + */ +struct l4_kwq_update_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 pg_cid; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 + u8 pg_unused_a; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u8 pg_unused_a; + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 +#endif +#if defined(__BIG_ENDIAN) + u16 reserverd3; + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u16 reserverd3; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif + u32 reserved4; + u32 reserved5; +}; + + +/* + * a KWQE request to upload a PG or L4 context + */ +struct l4_kwq_upload { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + +#endif /* CNIC_DEFS_H */ diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h new file mode 100644 index 00000000000..06380963a34 --- /dev/null +++ b/drivers/net/cnic_if.h @@ -0,0 +1,299 @@ +/* cnic_if.h: Broadcom CNIC core network driver. + * + * Copyright (c) 2006 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_IF_H +#define CNIC_IF_H + +#define CNIC_MODULE_VERSION "2.0.0" +#define CNIC_MODULE_RELDATE "May 21, 2009" + +#define CNIC_ULP_RDMA 0 +#define CNIC_ULP_ISCSI 1 +#define CNIC_ULP_L4 2 +#define MAX_CNIC_ULP_TYPE_EXT 2 +#define MAX_CNIC_ULP_TYPE 3 + +struct kwqe { + u32 kwqe_op_flag; + +#define KWQE_OPCODE_MASK 0x00ff0000 +#define KWQE_OPCODE_SHIFT 16 +#define KWQE_FLAGS_LAYER_SHIFT 28 +#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) + + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; + u32 kwqe_info4; + u32 kwqe_info5; + u32 kwqe_info6; +}; + +struct kwqe_16 { + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; +}; + +struct kcqe { + u32 kcqe_info0; + u32 kcqe_info1; + u32 kcqe_info2; + u32 kcqe_info3; + u32 kcqe_info4; + u32 kcqe_info5; + u32 kcqe_info6; + u32 kcqe_op_flag; + #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ + #define KCQE_FLAGS_LAYER_MASK (0x7<<28) + #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) + #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) + #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) + #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) + #define KCQE_FLAGS_NEXT (1<<31) + #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) + #define KCQE_FLAGS_OPCODE_SHIFT (16) + #define KCQE_OPCODE(op) \ + (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) +}; + +#define MAX_CNIC_CTL_DATA 64 +#define MAX_DRV_CTL_DATA 64 + +#define CNIC_CTL_STOP_CMD 1 +#define CNIC_CTL_START_CMD 2 +#define CNIC_CTL_COMPLETION_CMD 3 + +#define DRV_CTL_IO_WR_CMD 0x101 +#define DRV_CTL_IO_RD_CMD 0x102 +#define DRV_CTL_CTX_WR_CMD 0x103 +#define DRV_CTL_CTXTBL_WR_CMD 0x104 +#define DRV_CTL_COMPLETION_CMD 0x105 + +struct cnic_ctl_completion { + u32 cid; +}; + +struct drv_ctl_completion { + u32 comp_count; +}; + +struct cnic_ctl_info { + int cmd; + union { + struct cnic_ctl_completion comp; + char bytes[MAX_CNIC_CTL_DATA]; + } data; +}; + +struct drv_ctl_io { + u32 cid_addr; + u32 offset; + u32 data; + dma_addr_t dma_addr; +}; + +struct drv_ctl_info { + int cmd; + union { + struct drv_ctl_completion comp; + struct drv_ctl_io io; + char bytes[MAX_DRV_CTL_DATA]; + } data; +}; + +struct cnic_ops { + struct module *cnic_owner; + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + int (*cnic_handler)(void *, void *); + int (*cnic_ctl)(void *, struct cnic_ctl_info *); +}; + +#define MAX_CNIC_VEC 8 + +struct cnic_irq { + unsigned int vector; + void *status_blk; + u32 status_blk_num; + u32 irq_flags; +#define CNIC_IRQ_FL_MSIX 0x00000001 +}; + +struct cnic_eth_dev { + struct module *drv_owner; + u32 drv_state; +#define CNIC_DRV_STATE_REGD 0x00000001 +#define CNIC_DRV_STATE_USING_MSIX 0x00000002 + u32 chip_id; + u32 max_kwqe_pending; + struct pci_dev *pdev; + void __iomem *io_base; + + u32 ctx_tbl_offset; + u32 ctx_tbl_len; + int ctx_blk_size; + u32 starting_cid; + u32 max_iscsi_conn; + u32 max_fcoe_conn; + u32 max_rdma_conn; + u32 reserved0[2]; + + int num_irq; + struct cnic_irq irq_arr[MAX_CNIC_VEC]; + int (*drv_register_cnic)(struct net_device *, + struct cnic_ops *, void *); + int (*drv_unregister_cnic)(struct net_device *); + int (*drv_submit_kwqes_32)(struct net_device *, + struct kwqe *[], u32); + int (*drv_submit_kwqes_16)(struct net_device *, + struct kwqe_16 *[], u32); + int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); + unsigned long reserved1[2]; +}; + +struct cnic_sockaddr { + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } local; + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } remote; +}; + +struct cnic_sock { + struct cnic_dev *dev; + void *context; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_id; + unsigned char old_ha[6]; + unsigned char ha[6]; + u32 mtu; + u32 cid; + u32 l5_cid; + u32 pg_cid; + int ulp_type; + + u32 ka_timeout; + u32 ka_interval; + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; + u32 rcv_buf; + u32 snd_buf; + u32 seed; + + unsigned long tcp_flags; +#define SK_TCP_NO_DELAY_ACK 0x1 +#define SK_TCP_KEEP_ALIVE 0x2 +#define SK_TCP_NAGLE 0x4 +#define SK_TCP_TIMESTAMP 0x8 +#define SK_TCP_SACK 0x10 +#define SK_TCP_SEG_SCALING 0x20 + unsigned long flags; +#define SK_F_INUSE 0 +#define SK_F_OFFLD_COMPLETE 1 +#define SK_F_OFFLD_SCHED 2 +#define SK_F_PG_OFFLD_COMPLETE 3 +#define SK_F_CONNECT_START 4 +#define SK_F_IPV6 5 +#define SK_F_CLOSING 7 + + atomic_t ref_count; + u32 state; + struct kwqe kwqe1; + struct kwqe kwqe2; + struct kwqe kwqe3; +}; + +struct cnic_dev { + struct net_device *netdev; + struct pci_dev *pcidev; + void __iomem *regview; + struct list_head list; + + int (*register_device)(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx); + int (*unregister_device)(struct cnic_dev *dev, int ulp_type); + int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes); + int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], + u32 num_wqes); + + int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, + void *); + int (*cm_destroy)(struct cnic_sock *); + int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); + int (*cm_abort)(struct cnic_sock *); + int (*cm_close)(struct cnic_sock *); + struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); + int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, + char *data, u16 data_size); + unsigned long flags; +#define CNIC_F_CNIC_UP 1 +#define CNIC_F_BNX2_CLASS 3 +#define CNIC_F_BNX2X_CLASS 4 + atomic_t ref_count; + u8 mac_addr[6]; + + int max_iscsi_conn; + int max_fcoe_conn; + int max_rdma_conn; + + void *cnic_priv; +}; + +#define CNIC_WR(dev, off, val) writel(val, dev->regview + off) +#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) +#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) +#define CNIC_RD(dev, off) readl(dev->regview + off) +#define CNIC_RD16(dev, off) readw(dev->regview + off) + +struct cnic_ulp_ops { + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + + void (*cnic_init)(struct cnic_dev *dev); + void (*cnic_exit)(struct cnic_dev *dev); + void (*cnic_start)(void *ulp_ctx); + void (*cnic_stop)(void *ulp_ctx); + void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], + u32 num_cqes); + void (*indicate_netevent)(void *ulp_ctx, unsigned long event); + void (*cm_connect_complete)(struct cnic_sock *); + void (*cm_close_complete)(struct cnic_sock *); + void (*cm_abort_complete)(struct cnic_sock *); + void (*cm_remote_close)(struct cnic_sock *); + void (*cm_remote_abort)(struct cnic_sock *); + void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, + char *data, u16 data_size); + struct module *owner; +}; + +extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); + +extern int cnic_unregister_driver(int ulp_type); + +#endif -- cgit v1.2.3 From 9499f5e7ed5224c40706f0cec6542a9916bc7606 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 12 Jun 2009 22:16:35 -0600 Subject: virtio: add names to virtqueue struct, mapping from devices to queues. Add a linked list of all virtqueues for a virtio device: this helps for debugging and is also needed for upcoming interface change. Also, add a "name" field for clearer debug messages. Signed-off-by: Rusty Russell --- drivers/block/virtio_blk.c | 2 +- drivers/char/hw_random/virtio-rng.c | 2 +- drivers/char/virtio_console.c | 4 ++-- drivers/lguest/lguest_device.c | 5 +++-- drivers/net/virtio_net.c | 6 +++--- drivers/s390/kvm/kvm_virtio.c | 7 ++++--- drivers/virtio/virtio.c | 2 ++ drivers/virtio/virtio_balloon.c | 4 ++-- drivers/virtio/virtio_pci.c | 5 +++-- drivers/virtio/virtio_ring.c | 27 ++++++++++++++++++++------- include/linux/virtio.h | 12 ++++++++---- include/linux/virtio_config.h | 6 ++++-- include/linux/virtio_ring.h | 3 ++- net/9p/trans_virtio.c | 2 +- 14 files changed, 56 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c0facaa55cf..db55a50d9f6 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev) sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ - vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); + vblk->vq = vdev->config->find_vq(vdev, 0, blk_done, "requests"); if (IS_ERR(vblk->vq)) { err = PTR_ERR(vblk->vq); goto out_free_vblk; diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 86e83f88313..2aeafcea95f 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -94,7 +94,7 @@ static int virtrng_probe(struct virtio_device *vdev) int err; /* We expect a single virtqueue. */ - vq = vdev->config->find_vq(vdev, 0, random_recv_done); + vq = vdev->config->find_vq(vdev, 0, random_recv_done, "input"); if (IS_ERR(vq)) return PTR_ERR(vq); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index ff6f5a4b58f..58684e4a081 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -202,13 +202,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev) /* Find the input queue. */ /* FIXME: This is why we want to wean off hvc: we do nothing * when input comes in. */ - in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input); + in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input, "input"); if (IS_ERR(in_vq)) { err = PTR_ERR(in_vq); goto free; } - out_vq = vdev->config->find_vq(vdev, 1, NULL); + out_vq = vdev->config->find_vq(vdev, 1, NULL, "output"); if (IS_ERR(out_vq)) { err = PTR_ERR(out_vq); goto free_in_vq; diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index df44d962626..4babed899d5 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq); * function. */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq)) + void (*callback)(struct virtqueue *vq), + const char *name) { struct lguest_device *ldev = to_lgdev(vdev); struct lguest_vq_info *lvq; @@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, /* OK, tell virtio_ring.c to set up a virtqueue now we know its size * and we've got a pointer to its pages. */ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, - vdev, lvq->pages, lg_notify, callback); + vdev, lvq->pages, lg_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4d1d47953fc..be3b734ff5a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -906,20 +906,20 @@ static int virtnet_probe(struct virtio_device *vdev) vi->mergeable_rx_bufs = true; /* We expect two virtqueues, receive then send. */ - vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); + vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done, "input"); if (IS_ERR(vi->rvq)) { err = PTR_ERR(vi->rvq); goto free; } - vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); + vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done, "output"); if (IS_ERR(vi->svq)) { err = PTR_ERR(vi->svq); goto free_recv; } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { - vi->cvq = vdev->config->find_vq(vdev, 2, NULL); + vi->cvq = vdev->config->find_vq(vdev, 2, NULL, "control"); if (IS_ERR(vi->cvq)) { err = PTR_ERR(vi->svq); goto free_send; diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index cbc8566fab7..ba8995fbf04 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq) * this device and sets it up. */ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *vq)) + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqconfig *config; @@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, vdev, (void *) config->address, - kvm_notify, callback); + kvm_notify, callback, name); if (!vq) { err = -ENOMEM; goto unmap; diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 6b681036486..3f52c767dfe 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -186,6 +186,8 @@ int register_virtio_device(struct virtio_device *dev) /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + INIT_LIST_HEAD(&dev->vqs); + /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 9c76a061a04..0fa73b4d18b 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -218,13 +218,13 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; /* We expect two virtqueues. */ - vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); + vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack, "inflate"); if (IS_ERR(vb->inflate_vq)) { err = PTR_ERR(vb->inflate_vq); goto out_free_vb; } - vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); + vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack, "deflate"); if (IS_ERR(vb->deflate_vq)) { err = PTR_ERR(vb->deflate_vq); goto out_del_inflate_vq; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 330aacbdec1..be4047abd5b 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -208,7 +208,8 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) /* the config->find_vq() implementation */ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq)) + void (*callback)(struct virtqueue *vq), + const char *name) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; @@ -247,7 +248,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, /* create the vring */ vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, - vdev, info->queue, vp_notify, callback); + vdev, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c52369ab9b..579fa693d5d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -23,21 +23,30 @@ #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ -#define BAD_RING(_vq, fmt...) \ - do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&(_vq)->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + BUG(); \ + } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ - panic("in_use = %i\n", (_vq)->in_use); \ + panic("%s:in_use = %i\n", \ + (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ mb(); \ - } while(0) + } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) #else -#define BAD_RING(_vq, fmt...) \ - do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&_vq->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + (_vq)->broken = true; \ + } while (0) #define START_USE(vq) #define END_USE(vq) #endif @@ -284,7 +293,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *)) + void (*callback)(struct virtqueue *), + const char *name) { struct vring_virtqueue *vq; unsigned int i; @@ -303,10 +313,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.vq_ops = &vring_vq_ops; + vq->vq.name = name; vq->notify = notify; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; + list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; #endif @@ -327,6 +339,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { + list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 9410394bbf9..4fca4f5440b 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -10,14 +10,17 @@ /** * virtqueue - a queue to register buffers for sending or receiving. + * @list: the chain of virtqueues for this device * @callback: the function to call when buffers are consumed (can be NULL). + * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @vq_ops: the operations for this virtqueue (see below). * @priv: a pointer for the virtqueue implementation to use. */ -struct virtqueue -{ +struct virtqueue { + struct list_head list; void (*callback)(struct virtqueue *vq); + const char *name; struct virtio_device *vdev; struct virtqueue_ops *vq_ops; void *priv; @@ -76,15 +79,16 @@ struct virtqueue_ops { * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. + * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. */ -struct virtio_device -{ +struct virtio_device { int index; struct device dev; struct virtio_device_id id; struct virtio_config_ops *config; + struct list_head vqs; /* Note that this is a Linux set_bit-style bitmap. */ unsigned long features[1]; void *priv; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bf8ec283b23..9fae274751e 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -55,7 +55,8 @@ * @find_vq: find a virtqueue and instantiate it. * vdev: the virtio_device * index: the 0-based virtqueue number in case there's more than one. - * callback: the virqtueue callback + * callback: the virtqueue callback + * name: the virtqueue name (mainly for debugging) * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). * @del_vq: free a virtqueue found by find_vq(). * @get_features: get the array of feature bits for this device. @@ -77,7 +78,8 @@ struct virtio_config_ops void (*reset)(struct virtio_device *vdev); struct virtqueue *(*find_vq)(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *)); + void (*callback)(struct virtqueue *), + const char *name); void (*del_vq)(struct virtqueue *vq); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 71e03722fb5..166c519689d 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -119,7 +119,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *vq), - void (*callback)(struct virtqueue *vq)); + void (*callback)(struct virtqueue *vq), + const char *name); void vring_del_virtqueue(struct virtqueue *vq); /* Filter out transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev); diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index bb8579a141a..ab8791f9aba 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -246,7 +246,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vdev = vdev; /* We expect one virtqueue, for requests. */ - chan->vq = vdev->config->find_vq(vdev, 0, req_done); + chan->vq = vdev->config->find_vq(vdev, 0, req_done, "requests"); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); goto out_free_vq; -- cgit v1.2.3 From d2a7ddda9ffb1c8961abff6714b0f1eb925c120f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Jun 2009 22:16:36 -0600 Subject: virtio: find_vqs/del_vqs virtio operations This replaces find_vq/del_vq with find_vqs/del_vqs virtio operations, and updates all drivers. This is needed for MSI support, because MSI needs to know the total number of vectors upfront. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell (+ lguest/9p compile fixes) --- drivers/block/virtio_blk.c | 6 ++--- drivers/char/hw_random/virtio-rng.c | 6 ++--- drivers/char/virtio_console.c | 26 +++++++++----------- drivers/lguest/lguest_device.c | 36 ++++++++++++++++++++++++++-- drivers/net/virtio_net.c | 45 ++++++++++++++--------------------- drivers/s390/kvm/kvm_virtio.c | 36 ++++++++++++++++++++++++++-- drivers/virtio/virtio_balloon.c | 27 +++++++++------------ drivers/virtio/virtio_pci.c | 37 +++++++++++++++++++++++------ include/linux/virtio_config.h | 47 ++++++++++++++++++++++++++++--------- net/9p/trans_virtio.c | 6 ++--- 10 files changed, 183 insertions(+), 89 deletions(-) (limited to 'drivers/net') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index db55a50d9f6..07d8e595e51 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -288,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev) sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ - vblk->vq = vdev->config->find_vq(vdev, 0, blk_done, "requests"); + vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); if (IS_ERR(vblk->vq)) { err = PTR_ERR(vblk->vq); goto out_free_vblk; @@ -388,7 +388,7 @@ out_put_disk: out_mempool: mempool_destroy(vblk->pool); out_free_vq: - vdev->config->del_vq(vblk->vq); + vdev->config->del_vqs(vdev); out_free_vblk: kfree(vblk); out: @@ -409,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev) blk_cleanup_queue(vblk->disk->queue); put_disk(vblk->disk); mempool_destroy(vblk->pool); - vdev->config->del_vq(vblk->vq); + vdev->config->del_vqs(vdev); kfree(vblk); } diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 2aeafcea95f..f2041fede82 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -94,13 +94,13 @@ static int virtrng_probe(struct virtio_device *vdev) int err; /* We expect a single virtqueue. */ - vq = vdev->config->find_vq(vdev, 0, random_recv_done, "input"); + vq = virtio_find_single_vq(vdev, random_recv_done, "input"); if (IS_ERR(vq)) return PTR_ERR(vq); err = hwrng_register(&virtio_hwrng); if (err) { - vdev->config->del_vq(vq); + vdev->config->del_vqs(vdev); return err; } @@ -112,7 +112,7 @@ static void virtrng_remove(struct virtio_device *vdev) { vdev->config->reset(vdev); hwrng_unregister(&virtio_hwrng); - vdev->config->del_vq(vq); + vdev->config->del_vqs(vdev); } static struct virtio_device_id id_table[] = { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 58684e4a081..c74dacfa679 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq) * Finally we put our input buffer in the input queue, ready to receive. */ static int __devinit virtcons_probe(struct virtio_device *dev) { + vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; + const char *names[] = { "input", "output" }; + struct virtqueue *vqs[2]; int err; vdev = dev; @@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev) goto fail; } - /* Find the input queue. */ + /* Find the queues. */ /* FIXME: This is why we want to wean off hvc: we do nothing * when input comes in. */ - in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input, "input"); - if (IS_ERR(in_vq)) { - err = PTR_ERR(in_vq); + err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); + if (err) goto free; - } - out_vq = vdev->config->find_vq(vdev, 1, NULL, "output"); - if (IS_ERR(out_vq)) { - err = PTR_ERR(out_vq); - goto free_in_vq; - } + in_vq = vqs[0]; + out_vq = vqs[1]; /* Start using the new console output. */ virtio_cons.get_chars = get_chars; @@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev) hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); if (IS_ERR(hvc)) { err = PTR_ERR(hvc); - goto free_out_vq; + goto free_vqs; } /* Register the input buffer the first time. */ add_inbuf(); return 0; -free_out_vq: - vdev->config->del_vq(out_vq); -free_in_vq: - vdev->config->del_vq(in_vq); +free_vqs: + vdev->config->del_vqs(vdev); free: kfree(inbuf); fail: diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 4babed899d5..e082cdac88b 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -313,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq) kfree(lvq); } +static void lg_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + lg_del_vq(vq); +} + +static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct lguest_device *ldev = to_lgdev(vdev); + int i; + + /* We must have this many virtqueues. */ + if (nvqs > ldev->desc->num_vq) + return -ENOENT; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + lg_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + /* The ops structure which hooks everything together. */ static struct virtio_config_ops lguest_config_ops = { .get_features = lg_get_features, @@ -322,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = { .get_status = lg_get_status, .set_status = lg_set_status, .reset = lg_reset, - .find_vq = lg_find_vq, - .del_vq = lg_del_vq, + .find_vqs = lg_find_vqs, + .del_vqs = lg_del_vqs, }; /* The root device for the lguest virtio devices. This makes them appear as diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index be3b734ff5a..7fa620ddeb2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -845,6 +845,10 @@ static int virtnet_probe(struct virtio_device *vdev) int err; struct net_device *dev; struct virtnet_info *vi; + struct virtqueue *vqs[3]; + vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; + const char *names[] = { "input", "output", "control" }; + int nvqs; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev(sizeof(struct virtnet_info)); @@ -905,25 +909,19 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; - /* We expect two virtqueues, receive then send. */ - vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done, "input"); - if (IS_ERR(vi->rvq)) { - err = PTR_ERR(vi->rvq); + /* We expect two virtqueues, receive then send, + * and optionally control. */ + nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; + + err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); + if (err) goto free; - } - vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done, "output"); - if (IS_ERR(vi->svq)) { - err = PTR_ERR(vi->svq); - goto free_recv; - } + vi->rvq = vqs[0]; + vi->svq = vqs[1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { - vi->cvq = vdev->config->find_vq(vdev, 2, NULL, "control"); - if (IS_ERR(vi->cvq)) { - err = PTR_ERR(vi->svq); - goto free_send; - } + vi->cvq = vqs[2]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) dev->features |= NETIF_F_HW_VLAN_FILTER; @@ -941,7 +939,7 @@ static int virtnet_probe(struct virtio_device *vdev) err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); - goto free_ctrl; + goto free_vqs; } /* Last of all, set up some receive buffers. */ @@ -962,13 +960,8 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); -free_ctrl: - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) - vdev->config->del_vq(vi->cvq); -free_send: - vdev->config->del_vq(vi->svq); -free_recv: - vdev->config->del_vq(vi->rvq); +free_vqs: + vdev->config->del_vqs(vdev); free: free_netdev(dev); return err; @@ -994,12 +987,10 @@ static void virtnet_remove(struct virtio_device *vdev) BUG_ON(vi->num != 0); - vdev->config->del_vq(vi->svq); - vdev->config->del_vq(vi->rvq); - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) - vdev->config->del_vq(vi->cvq); unregister_netdev(vi->dev); + vdev->config->del_vqs(vi->vdev); + while (vi->pages) __free_pages(get_a_page(vi, GFP_KERNEL), 0); diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index ba8995fbf04..e38e5d306fa 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -227,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq) KVM_S390_VIRTIO_RING_ALIGN)); } +static void kvm_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + kvm_del_vq(vq); +} + +static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct kvm_device *kdev = to_kvmdev(vdev); + int i; + + /* We must have this many virtqueues. */ + if (nvqs > kdev->desc->num_vq) + return -ENOENT; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + kvm_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + /* * The config ops structure as defined by virtio config */ @@ -238,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { .get_status = kvm_get_status, .set_status = kvm_set_status, .reset = kvm_reset, - .find_vq = kvm_find_vq, - .del_vq = kvm_del_vq, + .find_vqs = kvm_find_vqs, + .del_vqs = kvm_del_vqs, }; /* diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0fa73b4d18b..26b27826479 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -204,6 +204,9 @@ static int balloon(void *_vballoon) static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; + struct virtqueue *vqs[2]; + vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; + const char *names[] = { "inflate", "deflate" }; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); @@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; /* We expect two virtqueues. */ - vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack, "inflate"); - if (IS_ERR(vb->inflate_vq)) { - err = PTR_ERR(vb->inflate_vq); + err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); + if (err) goto out_free_vb; - } - vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack, "deflate"); - if (IS_ERR(vb->deflate_vq)) { - err = PTR_ERR(vb->deflate_vq); - goto out_del_inflate_vq; - } + vb->inflate_vq = vqs[0]; + vb->deflate_vq = vqs[1]; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); - goto out_del_deflate_vq; + goto out_del_vqs; } vb->tell_host_first @@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev) return 0; -out_del_deflate_vq: - vdev->config->del_vq(vb->deflate_vq); -out_del_inflate_vq: - vdev->config->del_vq(vb->inflate_vq); +out_del_vqs: + vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: @@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev) /* Now we reset the device so we can clean up the queues. */ vdev->config->reset(vdev); - vdev->config->del_vq(vb->deflate_vq); - vdev->config->del_vq(vb->inflate_vq); + vdev->config->del_vqs(vdev); kfree(vb); } diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index be4047abd5b..027f13fbe49 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -276,11 +276,7 @@ static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; - unsigned long flags, size; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vp_dev->lock, flags); + unsigned long size; vring_del_virtqueue(vq); @@ -293,14 +289,41 @@ static void vp_del_vq(struct virtqueue *vq) kfree(info); } +static void vp_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + vp_del_vq(vq); +} + +static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int i; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + vp_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, - .find_vq = vp_find_vq, - .del_vq = vp_del_vq, + .find_vqs = vp_find_vqs, + .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, }; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 9fae274751e..4cd290c06a8 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -29,6 +29,7 @@ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 #ifdef __KERNEL__ +#include #include /** @@ -49,16 +50,26 @@ * @set_status: write the status byte * vdev: the virtio_device * status: the new status byte + * @request_vqs: request the specified number of virtqueues + * vdev: the virtio_device + * max_vqs: the max number of virtqueues we want + * If supplied, must call before any virtqueues are instantiated. + * To modify the max number of virtqueues after request_vqs has been + * called, call free_vqs and then request_vqs with a new value. + * @free_vqs: cleanup resources allocated by request_vqs + * vdev: the virtio_device + * If supplied, must call after all virtqueues have been deleted. * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again - * @find_vq: find a virtqueue and instantiate it. + * @find_vqs: find virtqueues and instantiate them. * vdev: the virtio_device - * index: the 0-based virtqueue number in case there's more than one. - * callback: the virtqueue callback - * name: the virtqueue name (mainly for debugging) - * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). - * @del_vq: free a virtqueue found by find_vq(). + * nvqs: the number of virtqueues to find + * vqs: on success, includes new virtqueues + * callbacks: array of callbacks, for each virtqueue + * names: array of virtqueue names (mainly for debugging) + * Returns 0 on success or error status + * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). @@ -67,6 +78,7 @@ * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. */ +typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, @@ -76,11 +88,11 @@ struct virtio_config_ops u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); - struct virtqueue *(*find_vq)(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *), - const char *name); - void (*del_vq)(struct virtqueue *vq); + int (*find_vqs)(struct virtio_device *, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); + void (*del_vqs)(struct virtio_device *); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); }; @@ -128,5 +140,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev, vdev->config->get(vdev, offset, buf, len); return 0; } + +static inline +struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, + vq_callback_t *c, const char *n) +{ + vq_callback_t *callbacks[] = { c }; + const char *names[] = { n }; + struct virtqueue *vq; + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + if (err < 0) + return ERR_PTR(err); + return vq; +} #endif /* __KERNEL__ */ #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index ab8791f9aba..a49484e67e1 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -246,7 +246,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vdev = vdev; /* We expect one virtqueue, for requests. */ - chan->vq = vdev->config->find_vq(vdev, 0, req_done, "requests"); + chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); goto out_free_vq; @@ -261,7 +261,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) return 0; out_free_vq: - vdev->config->del_vq(chan->vq); + vdev->config->del_vqs(vdev); fail: mutex_lock(&virtio_9p_lock); chan_index--; @@ -332,7 +332,7 @@ static void p9_virtio_remove(struct virtio_device *vdev) BUG_ON(chan->inuse); if (chan->initialized) { - vdev->config->del_vq(chan->vq); + vdev->config->del_vqs(vdev); chan->initialized = false; } } -- cgit v1.2.3 From 4b512d26f425be1c779c8319249b42ce3c3424d2 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Tue, 14 Apr 2009 23:14:10 -0300 Subject: trivial: typo (en|dis|avail|remove)bale -> (en|dis|avail|remove)able Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Jiri Kosina --- drivers/char/amiserial.c | 2 +- drivers/media/video/hdpvr/hdpvr-video.c | 2 +- drivers/net/b44.h | 2 +- drivers/net/e100.c | 2 +- drivers/net/niu.h | 4 ++-- drivers/scsi/megaraid.h | 2 +- drivers/scsi/megaraid/mbox_defs.h | 2 +- drivers/staging/rt2860/common/mlme.c | 2 +- drivers/staging/rt2870/common/mlme.c | 2 +- drivers/staging/rt3070/common/mlme.c | 2 +- drivers/watchdog/iop_wdt.c | 2 +- sound/pci/aw2/aw2-saa7146.c | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index fd3ebd1be57..72429b6b2fa 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -779,7 +779,7 @@ static void change_speed(struct async_struct *info, info->IER |= UART_IER_MSI; } /* TBD: - * Does clearing IER_MSI imply that we should disbale the VBL interrupt ? + * Does clearing IER_MSI imply that we should disable the VBL interrupt ? */ /* diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index 3e6ffee8dfe..ccd47f57f42 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c @@ -181,7 +181,7 @@ static int hdpvr_submit_buffers(struct hdpvr_device *dev) buff_list); if (buf->status != BUFSTAT_AVAILABLE) { v4l2_err(&dev->v4l2_dev, - "buffer not marked as availbale\n"); + "buffer not marked as available\n"); ret = -EFAULT; goto err; } diff --git a/drivers/net/b44.h b/drivers/net/b44.h index e678498de6d..d24158e7f30 100644 --- a/drivers/net/b44.h +++ b/drivers/net/b44.h @@ -97,7 +97,7 @@ #define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ #define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ #define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ -#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */ +#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */ #define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ #define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ #define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 0f9ee134855..af5364f4955 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -2785,7 +2785,7 @@ static int e100_resume(struct pci_dev *pdev) /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, 0, 0); - /* disbale reverse auto-negotiation */ + /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, E100_82552_SMARTSPEED); diff --git a/drivers/net/niu.h b/drivers/net/niu.h index 8754e44cada..3bd0b5933d5 100644 --- a/drivers/net/niu.h +++ b/drivers/net/niu.h @@ -3242,8 +3242,8 @@ struct niu { struct niu_parent *parent; u32 flags; -#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removebale PHY detected*/ -#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removebale PHY */ +#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/ +#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */ #define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ #define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ #define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 795201fa0b4..512c2cc1a33 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -469,7 +469,7 @@ typedef struct { u8 type; /* Type of the device */ u8 cur_status; /* current status of the device */ u8 tag_depth; /* Level of tagging */ - u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ + u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */ u32 size; /* configurable size in terms of 512 byte blocks */ }__attribute__ ((packed)) phys_drv; diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h index 170399ef06f..b25b74764ec 100644 --- a/drivers/scsi/megaraid/mbox_defs.h +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -686,7 +686,7 @@ typedef struct { * @type : Type of the device * @cur_status : current status of the device * @tag_depth : Level of tagging - * @sync_neg : sync negotiation - ENABLE or DISBALE + * @sync_neg : sync negotiation - ENABLE or DISABLE * @size : configurable size in terms of 512 byte */ typedef struct { diff --git a/drivers/staging/rt2860/common/mlme.c b/drivers/staging/rt2860/common/mlme.c index c00f9ab9c46..2edf2999f5c 100644 --- a/drivers/staging/rt2860/common/mlme.c +++ b/drivers/staging/rt2860/common/mlme.c @@ -5664,7 +5664,7 @@ VOID AsicUpdateProtect( #if 0 MacReg |= (pAd->CommonCfg.RtsThreshold << 8); #else - // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 + // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || diff --git a/drivers/staging/rt2870/common/mlme.c b/drivers/staging/rt2870/common/mlme.c index 8a82cee8bf2..a26bc033337 100644 --- a/drivers/staging/rt2870/common/mlme.c +++ b/drivers/staging/rt2870/common/mlme.c @@ -5561,7 +5561,7 @@ VOID AsicUpdateProtect( #if 0 MacReg |= (pAd->CommonCfg.RtsThreshold << 8); #else - // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 + // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || diff --git a/drivers/staging/rt3070/common/mlme.c b/drivers/staging/rt3070/common/mlme.c index 0ffbfa36699..0189bab013c 100644 --- a/drivers/staging/rt3070/common/mlme.c +++ b/drivers/staging/rt3070/common/mlme.c @@ -5575,7 +5575,7 @@ VOID AsicUpdateProtect( RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg); MacReg &= 0xFF0000FF; - // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 + // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 if (( #ifdef DOT11_N_SUPPORT (pAd->CommonCfg.BACapability.field.AmsduEnable) || diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c index 96eb2cbe587..0c905967669 100644 --- a/drivers/watchdog/iop_wdt.c +++ b/drivers/watchdog/iop_wdt.c @@ -192,7 +192,7 @@ static int iop_wdt_release(struct inode *inode, struct file *file) if (test_bit(WDT_ENABLED, &wdt_status)) state = wdt_disable(); - /* if the timer is not disbaled reload and notify that we are still + /* if the timer is not disabled reload and notify that we are still * going down */ if (state != 0) { diff --git a/sound/pci/aw2/aw2-saa7146.c b/sound/pci/aw2/aw2-saa7146.c index 6a3891ab69d..296123ab74f 100644 --- a/sound/pci/aw2/aw2-saa7146.c +++ b/sound/pci/aw2/aw2-saa7146.c @@ -108,7 +108,7 @@ void snd_aw2_saa7146_setup(struct snd_aw2_saa7146 *chip, #endif /* WS0_CTRL, WS0_SYNC: input TSL1, I2S */ - /* At initialization WS1 and WS2 are disbaled (configured as input */ + /* At initialization WS1 and WS2 are disabled (configured as input) */ acon1 |= 0 * WS1_CTRL; acon1 |= 0 * WS2_CTRL; -- cgit v1.2.3 From 6d60f9dfc8d437e914d46fa355c50c695cad24e7 Mon Sep 17 00:00:00 2001 From: Martin Olsson Date: Tue, 7 Apr 2009 10:30:24 +0200 Subject: trivial: Fix paramater/parameter typo in dmesg and source comments Signed-off-by: Martin Olsson Signed-off-by: Jiri Kosina --- drivers/net/wireless/rndis_wlan.c | 2 +- net/netfilter/nf_conntrack_acct.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index bebf735cd4b..ff0042ffe3e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -584,7 +584,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param, ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, infobuf, info_len); if (ret != 0) - devdbg(dev, "setting rndis config paramater failed, %d.", ret); + devdbg(dev, "setting rndis config parameter failed, %d.", ret); kfree(infobuf); return ret; diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index 9fe8982bd7c..4a1d94aac20 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c @@ -116,7 +116,7 @@ int nf_conntrack_acct_init(struct net *net) if (net_eq(net, &init_net)) { #ifdef CONFIG_NF_CT_ACCT printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n"); - printk(KERN_WARNING "nf_conntrack.acct=1 kernel paramater, acct=1 nf_conntrack module option or\n"); + printk(KERN_WARNING "nf_conntrack.acct=1 kernel parameter, acct=1 nf_conntrack module option or\n"); printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n"); #endif -- cgit v1.2.3 From 98a1708de1bfa5fe1c490febba850d6043d3c7fa Mon Sep 17 00:00:00 2001 From: Martin Olsson Date: Wed, 22 Apr 2009 18:21:29 +0200 Subject: trivial: fix typos s/paramter/parameter/ and s/excute/execute/ in documentation and source comments. Signed-off-by: Martin Olsson Signed-off-by: Jiri Kosina --- Documentation/powerpc/dts-bindings/fsl/board.txt | 2 +- Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt | 2 +- arch/s390/kernel/head.S | 2 +- drivers/ata/libata-acpi.c | 4 ++-- drivers/ata/libata-eh.c | 2 +- drivers/edac/e752x_edac.c | 2 +- drivers/isdn/divert/isdn_divert.c | 2 +- drivers/net/appletalk/ltpc.c | 2 +- drivers/net/e1000e/e1000.h | 2 +- drivers/net/ehea/ehea.h | 2 +- drivers/net/igbvf/igbvf.h | 2 +- drivers/net/mlx4/en_netdev.c | 2 +- drivers/net/qlge/qlge_mpi.c | 6 +++--- drivers/net/skfp/h/smt.h | 2 +- drivers/net/tokenring/3c359.c | 2 +- drivers/net/tokenring/lanstreamer.c | 2 +- drivers/net/tokenring/olympic.c | 2 +- drivers/net/ucc_geth_ethtool.c | 2 +- drivers/net/wireless/rt2x00/rt2x00lib.h | 2 +- drivers/net/wireless/wavelan_cs.c | 2 +- drivers/scsi/lpfc/lpfc_scsi.c | 4 ++-- sound/pci/sis7019.h | 4 ++-- 22 files changed, 27 insertions(+), 27 deletions(-) (limited to 'drivers/net') diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt index 6c974d28eeb..e8b5bc24d0a 100644 --- a/Documentation/powerpc/dts-bindings/fsl/board.txt +++ b/Documentation/powerpc/dts-bindings/fsl/board.txt @@ -38,7 +38,7 @@ Required properities: - reg : Should contain the address and the length of the GPIO bank register. - #gpio-cells : Should be two. The first cell is the pin number and the - second cell is used to specify optional paramters (currently unused). + second cell is used to specify optional parameters (currently unused). - gpio-controller : Marks the port as GPIO controller. Example: diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt index 1815dfede1b..349f79fd707 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt +++ b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt @@ -11,7 +11,7 @@ Required properties: "fsl,cpm1-pario-bank-c", "fsl,cpm1-pario-bank-d", "fsl,cpm1-pario-bank-e", "fsl,cpm2-pario-bank" - #gpio-cells : Should be two. The first cell is the pin number and the - second cell is used to specify optional paramters (currently unused). + second cell is used to specify optional parameters (currently unused). - gpio-controller : Marks the port as GPIO controller. Example of three SOC GPIO banks defined as gpio-controller nodes: diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 22596d70fc2..2c54e970d0d 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -64,7 +64,7 @@ __HEAD .org 0x100 # # subroutine for loading from tape -# Paramters: +# Parameters: # R1 = device number # R2 = load address .Lloader: diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 6273d98d00e..ac176da1f94 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -748,9 +748,9 @@ static int ata_acpi_run_tf(struct ata_device *dev, /** * ata_acpi_exec_tfs - get then write drive taskfile settings * @dev: target ATA device - * @nr_executed: out paramter for the number of executed commands + * @nr_executed: out parameter for the number of executed commands * - * Evaluate _GTF and excute returned taskfiles. + * Evaluate _GTF and execute returned taskfiles. * * LOCKING: * EH context. diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 94919ad03df..fa22f94ca41 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2864,7 +2864,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, /** * ata_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed - * @r_failed_dev: out paramter for failed device + * @r_failed_dev: out parameter for failed device * * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If * ata_set_mode() fails, pointer to the failing device is diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index facfdb1fa71..d205d493a68 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c @@ -1084,7 +1084,7 @@ static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) struct pci_dev *dev = pvt->dev_d0f1; int enable = 1; - /* Allow module paramter override, else see if CPU supports parity */ + /* Allow module parameter override, else see if CPU supports parity */ if (sysbus_parity != -1) { enable = sysbus_parity; } else if (cpu_id[0] && diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c index 7d97d54588d..77e9fdda059 100644 --- a/drivers/isdn/divert/isdn_divert.c +++ b/drivers/isdn/divert/isdn_divert.c @@ -183,7 +183,7 @@ int cf_command(int drvid, int mode, (mode != 1) ? "" : " 0 ", (mode != 1) ? "" : fwd_nr); - retval = divert_if.ll_cmd(&cs->ics); /* excute command */ + retval = divert_if.ll_cmd(&cs->ics); /* execute command */ if (!retval) { cs->prev = NULL; diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 78cc7146913..b642647170b 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1220,7 +1220,7 @@ static int __init ltpc_setup(char *str) if (ints[0] > 2) { dma = ints[3]; } - /* ignore any other paramters */ + /* ignore any other parameters */ } return 1; } diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index f37360aa12a..44f0bf23daf 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -62,7 +62,7 @@ struct e1000_info; e_printk(KERN_NOTICE, adapter, format, ## arg) -/* Interrupt modes, as used by the IntMode paramter */ +/* Interrupt modes, as used by the IntMode parameter */ #define E1000E_INT_MODE_LEGACY 0 #define E1000E_INT_MODE_MSI 1 #define E1000E_INT_MODE_MSIX 2 diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 16a41389575..78952f8324e 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -268,7 +268,7 @@ struct ehea_qp_init_attr { }; /* - * Event Queue attributes, passed as paramter + * Event Queue attributes, passed as parameter */ struct ehea_eq_attr { u32 type; diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index 4bff35e4687..d488733893a 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h @@ -45,7 +45,7 @@ struct igbvf_adapter; /* Interrupt defines */ #define IGBVF_START_ITR 648 /* ~6000 ints/sec */ -/* Interrupt modes, as used by the IntMode paramter */ +/* Interrupt modes, as used by the IntMode parameter */ #define IGBVF_INT_MODE_LEGACY 0 #define IGBVF_INT_MODE_MSI 1 #define IGBVF_INT_MODE_MSIX 2 diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 7bcc49de163..e8eeef0c9c9 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -371,7 +371,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) int i; /* If we haven't received a specific coalescing setting - * (module param), we set the moderation paramters as follows: + * (module param), we set the moderation parameters as follows: * - moder_cnt is set to the number of mtu sized packets to * satisfy our coelsing target. * - moder_time is set to a fixed value. diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 9f81b797f10..ac9493f6c1a 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -141,7 +141,7 @@ end: /* We are being asked by firmware to accept * a change to the port. This is only * a change to max frame sizes (Tx/Rx), pause - * paramters, or loopback mode. We wake up a worker + * parameters, or loopback mode. We wake up a worker * to handler processing this since a mailbox command * will need to be sent to ACK the request. */ @@ -371,7 +371,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) /* We are being asked by firmware to accept * a change to the port. This is only * a change to max frame sizes (Tx/Rx), pause - * paramters, or loopback mode. + * parameters, or loopback mode. */ case AEN_IDC_REQ: status = ql_idc_req_aen(qdev); @@ -380,7 +380,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) /* Process and inbound IDC event. * This will happen when we're trying to * change tx/rx max frame size, change pause - * paramters or loopback mode. + * parameters or loopback mode. */ case AEN_IDC_CMPLT: case AEN_IDC_EXT: diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h index 1ff589988d1..2976757a36f 100644 --- a/drivers/net/skfp/h/smt.h +++ b/drivers/net/skfp/h/smt.h @@ -413,7 +413,7 @@ struct smt_p_reason { #define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */ #define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */ #define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */ -#define SMT_RDF_NOPARAM 0x6 /* paramter not supported (PMF) */ +#define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */ #define SMT_RDF_RANGE 0x8 /* out of range */ #define SMT_RDF_AUTHOR 0x9 /* not autohorized */ #define SMT_RDF_LENGTH 0x0a /* length error */ diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 534c0f38483..0337b9d673f 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -79,7 +79,7 @@ MODULE_AUTHOR("Mike Phillips ") ; MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; MODULE_FIRMWARE(FW_NAME); -/* Module paramters */ +/* Module parameters */ /* Ring Speed 0,4,16 * 0 = Autosense diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 2e70ee8f145..46a2cc92d97 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -169,7 +169,7 @@ static char *open_min_error[] = { "Monitor Contention failer for RPL", "FDX Protocol Error" }; -/* Module paramters */ +/* Module parameters */ /* Ring Speed 0,4,16 * 0 = Autosense diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index d068a9d3688..2d819fc8558 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -132,7 +132,7 @@ static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Reserved", "Reserved", "No Monitor Detected for RPL", "Monitor Contention failer for RPL", "FDX Protocol Error"}; -/* Module paramters */ +/* Module parameters */ MODULE_AUTHOR("Mike Phillips ") ; MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 6fcb500257b..61fe80dda3e 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -7,7 +7,7 @@ * * Limitation: * Can only get/set setttings of the first queue. - * Need to re-open the interface manually after changing some paramters. + * Need to re-open the interface manually after changing some parameters. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a631613177d..d83e3794d34 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -235,7 +235,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna); * @rt2x00dev: Pointer to &struct rt2x00_dev. * * Initialize work structure and all link tuning related - * paramters. This will not start the link tuning process itself. + * parameters. This will not start the link tuning process itself. */ void rt2x00link_register(struct rt2x00_dev *rt2x00dev); diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index e55b33961ae..fa2821be44c 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -138,7 +138,7 @@ psa_read(struct net_device * dev, /*------------------------------------------------------------------*/ /* - * Write the Paramter Storage Area to the WaveLAN card's memory + * Write the Parameter Storage Area to the WaveLAN card's memory */ static void psa_write(struct net_device * dev, diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 8032c5adb6a..679adfff0bf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -827,8 +827,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc) * @reftag: out: ref tag (reference tag) * * Description: - * Extract DIF paramters from the command if possible. Otherwise, - * use default paratmers. + * Extract DIF parameters from the command if possible. Otherwise, + * use default parameters. * **/ static inline void diff --git a/sound/pci/sis7019.h b/sound/pci/sis7019.h index 013b6739a74..bc8c7681940 100644 --- a/sound/pci/sis7019.h +++ b/sound/pci/sis7019.h @@ -203,7 +203,7 @@ #define SIS_WEISR_B 0xac -/* Playback DMA parameters (paramter RAM) */ +/* Playback DMA parameters (parameter RAM) */ #define SIS_PLAY_DMA_OFFSET 0x0000 #define SIS_PLAY_DMA_SIZE 0x10 #define SIS_PLAY_DMA_ADDR(addr, num) \ @@ -228,7 +228,7 @@ #define SIS_PLAY_DMA_SSO_MASK 0xffff0000 #define SIS_PLAY_DMA_ESO_MASK 0x0000ffff -/* Capture DMA parameters (paramter RAM) */ +/* Capture DMA parameters (parameter RAM) */ #define SIS_CAPTURE_DMA_OFFSET 0x0800 #define SIS_CAPTURE_DMA_SIZE 0x10 #define SIS_CAPTURE_DMA_ADDR(addr, num) \ -- cgit v1.2.3 From 19af5cdb7c79ff5ec96a99893ffb7f894f4a3dc1 Mon Sep 17 00:00:00 2001 From: Martin Olsson Date: Thu, 23 Apr 2009 11:37:37 +0200 Subject: trivial: fix typo milisecond/millisecond for documentation and source comments. Signed-off-by: Martin Olsson Signed-off-by: Jiri Kosina --- Documentation/CodingStyle | 4 ++-- drivers/ide/ide-atapi.c | 2 +- drivers/isdn/mISDN/dsp_core.c | 2 +- drivers/net/ipg.h | 2 +- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/scsi/dpt/osd_util.h | 2 +- drivers/usb/serial/io_ti.c | 2 +- sound/pci/vx222/vx222_ops.c | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net') diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle index 72968cd5eaf..8bb37237ebd 100644 --- a/Documentation/CodingStyle +++ b/Documentation/CodingStyle @@ -698,8 +698,8 @@ very often is not. Abundant use of the inline keyword leads to a much bigger kernel, which in turn slows the system as a whole down, due to a bigger icache footprint for the CPU and simply because there is less memory available for the pagecache. Just think about it; a pagecache miss causes a -disk seek, which easily takes 5 miliseconds. There are a LOT of cpu cycles -that can go into these 5 miliseconds. +disk seek, which easily takes 5 milliseconds. There are a LOT of cpu cycles +that can go into these 5 milliseconds. A reasonable rule of thumb is to not put inline at functions that have more than 3 lines of code in them. An exception to this rule are the cases where diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 757e5956b13..ae1cae38a07 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -577,7 +577,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) /* * If necessary schedule the packet transfer to occur 'timeout' - * miliseconds later in ide_delayed_transfer_pc() after the + * milliseconds later in ide_delayed_transfer_pc() after the * device says it's ready for a packet. */ if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 3083338716b..47dbfe298b4 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -502,7 +502,7 @@ tone_off: break; } dsp->cmx_delay = (*((int *)data)) << 3; - /* miliseconds to samples */ + /* milliseconds to samples */ if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1)) /* clip to half of maximum usable buffer (half of half buffer) */ diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index dd9318f1949..dfc2541bb55 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h @@ -514,7 +514,7 @@ enum ipg_regs { #define IPG_DMALIST_ALIGN_PAD 0x07 #define IPG_MULTICAST_HASHTABLE_SIZE 0x40 -/* Number of miliseconds to wait after issuing a software reset. +/* Number of milliseconds to wait after issuing a software reset. * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. */ #define IPG_AC_RESETWAIT 0x05 diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 19ae0842047..18fd975412d 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -116,7 +116,7 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) { if (atomic_dec_return(&wka_port->refcount) != 0) return; - /* wait 10 miliseconds, other reqs might pop in */ + /* wait 10 milliseconds, other reqs might pop in */ schedule_delayed_work(&wka_port->work, HZ / 100); } diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h index 4b56c0436ba..b2613c2eaac 100644 --- a/drivers/scsi/dpt/osd_util.h +++ b/drivers/scsi/dpt/osd_util.h @@ -342,7 +342,7 @@ uLONG osdGetThreadID(void); /* wakes up the specifed thread */ void osdWakeThread(uLONG); -/* osd sleep for x miliseconds */ +/* osd sleep for x milliseconds */ void osdSleep(uLONG); #define DPT_THREAD_PRIORITY_LOWEST 0x00 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index eabf20eeb37..db964db42d3 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -102,7 +102,7 @@ struct edgeport_port { __u8 shadow_mcr; __u8 shadow_lsr; __u8 lsr_mask; - __u32 ump_read_timeout; /* Number of miliseconds the UMP will + __u32 ump_read_timeout; /* Number of milliseconds the UMP will wait without data before completing a read short */ int baud_rate; diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c index c0efe449111..6416d3f0c7b 100644 --- a/sound/pci/vx222/vx222_ops.c +++ b/sound/pci/vx222/vx222_ops.c @@ -367,7 +367,7 @@ static int vx2_load_xilinx_binary(struct vx_core *chip, const struct firmware *x unsigned int port; const unsigned char *image; - /* XILINX reset (wait at least 1 milisecond between reset on and off). */ + /* XILINX reset (wait at least 1 millisecond between reset on and off). */ vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE | VX_XILINX_RESET_MASK); vx_inl(chip, CNTRL); msleep(10); -- cgit v1.2.3 From 4737f0978d6e64eae468e01fa181abf6499e6b84 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Fri, 5 Jun 2009 00:44:53 +0200 Subject: trivial: Kconfig: .ko is normally not included in module names .ko is normally not included in Kconfig help, make it consistent. Signed-off-by: Pavel Machek Signed-off-by: Jiri Kosina --- arch/blackfin/Kconfig | 2 +- drivers/block/Kconfig | 2 +- drivers/char/Kconfig | 2 +- drivers/connector/Kconfig | 2 +- drivers/crypto/Kconfig | 6 +++--- drivers/ide/Kconfig | 2 +- drivers/media/dvb/dvb-usb/Kconfig | 2 +- drivers/mtd/devices/Kconfig | 2 +- drivers/mtd/nand/Kconfig | 4 ++-- drivers/net/Kconfig | 4 ++-- drivers/net/wireless/Kconfig | 2 +- drivers/net/wireless/hostap/Kconfig | 8 ++++---- drivers/net/wireless/iwlwifi/Kconfig | 4 ++-- drivers/net/wireless/rt2x00/Kconfig | 14 +++++++------- drivers/s390/net/Kconfig | 14 +++++++------- drivers/serial/Kconfig | 2 +- drivers/w1/Kconfig | 2 +- drivers/w1/masters/Kconfig | 6 +++--- drivers/watchdog/Kconfig | 2 +- net/ipv6/Kconfig | 2 +- net/netfilter/Kconfig | 4 ++-- 21 files changed, 44 insertions(+), 44 deletions(-) (limited to 'drivers/net') diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 3640cdc38aa..bc9ce89f620 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -872,7 +872,7 @@ config BFIN_GPTIMERS are unsure, say N. To compile this driver as a module, choose M here: the module - will be called gptimers.ko. + will be called gptimers. choice prompt "Uncached DMA region" diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index f42fa50d355..1b98cc52c22 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -112,7 +112,7 @@ config GDROM with up to 1 GB of data. This drive will also read standard CD ROM disks. Select this option to access any disks in your GD ROM drive. Most users will want to say "Y" here. - You can also build this as a module which will be called gdrom.ko + You can also build this as a module which will be called gdrom. source "drivers/block/paride/Kconfig" diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 02ecfd5fa61..067e9dcb95c 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -692,7 +692,7 @@ config HVCS this driver. To compile this driver as a module, choose M here: the - module will be called hvcs.ko. Additionally, this module + module will be called hvcs. Additionally, this module will depend on arch specific APIs exported from hvcserver.ko which will also be compiled when this driver is built as a module. diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig index 100bfd42206..6e6730f9dfd 100644 --- a/drivers/connector/Kconfig +++ b/drivers/connector/Kconfig @@ -7,7 +7,7 @@ menuconfig CONNECTOR of the netlink socket protocol. Connector support can also be built as a module. If so, the module - will be called cn.ko. + will be called cn. if CONNECTOR diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 01afd758072..3e72a6a96d7 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -34,7 +34,7 @@ config CRYPTO_DEV_PADLOCK_AES Available in VIA C3 and newer CPUs. If unsure say M. The compiled module will be - called padlock-aes.ko + called padlock-aes. config CRYPTO_DEV_PADLOCK_SHA tristate "PadLock driver for SHA1 and SHA256 algorithms" @@ -47,7 +47,7 @@ config CRYPTO_DEV_PADLOCK_SHA Available in VIA C7 and newer processors. If unsure say M. The compiled module will be - called padlock-sha.ko + called padlock-sha. config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" @@ -79,7 +79,7 @@ config ZCRYPT_MONOLITHIC bool "Monolithic zcrypt module" depends on ZCRYPT="m" help - Select this option if you want to have a single module z90crypt.ko + Select this option if you want to have a single module z90crypt, that contains all parts of the crypto device driver (ap bus, request router and all the card drivers). diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index cf06494bb74..9a5d0aaac9d 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -46,7 +46,7 @@ menuconfig IDE SMART parameters from disk drives. To compile this driver as a module, choose M here: the - module will be called ide-core.ko. + module will be called ide-core. For further information, please read . diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig index 60955a70d88..1bb66e1ed5a 100644 --- a/drivers/media/dvb/dvb-usb/Kconfig +++ b/drivers/media/dvb/dvb-usb/Kconfig @@ -216,7 +216,7 @@ config DVB_USB_TTUSB2 help Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The firmware protocol used by this module is similar to the one used by the - old ttusb-driver - that's why the module is called dvb-usb-ttusb2.ko. + old ttusb-driver - that's why the module is called dvb-usb-ttusb2. config DVB_USB_DTT200U tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)" diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 6fde0a2e356..325fab92a62 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -49,7 +49,7 @@ config MTD_MS02NV If you want to compile this driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . - The module will be called ms02-nv.ko. + The module will be called ms02-nv. config MTD_DATAFLASH tristate "Support for AT45xxx DataFlash" diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 890936d0275..f3276897859 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -260,7 +260,7 @@ config MTD_NAND_BASLER_EXCITE help This enables the driver for the NAND flash device found on the Basler eXcite Smart Camera. If built as a module, the driver - will be named "excite_nandflash.ko". + will be named excite_nandflash. config MTD_NAND_CAFE tristate "NAND support for OLPC CAFÉ chip" @@ -282,7 +282,7 @@ config MTD_NAND_CS553X controller is enabled for NAND, and currently requires that the controller be in MMIO mode. - If you say "m", the module will be called "cs553x_nand.ko". + If you say "m", the module will be called cs553x_nand. config MTD_NAND_ATMEL tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32" diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 214a92d1ef7..b4683ce5564 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1880,7 +1880,7 @@ config FEC_MPC52xx ---help--- This option enables support for the MPC5200's on-chip Fast Ethernet Controller - If compiled as module, it will be called 'fec_mpc52xx.ko'. + If compiled as module, it will be called fec_mpc52xx. config FEC_MPC52xx_MDIO bool "MPC52xx FEC MDIO bus driver" @@ -1892,7 +1892,7 @@ config FEC_MPC52xx_MDIO (Motorola? industry standard). If your board uses an external PHY connected to FEC, enable this. If not sure, enable. - If compiled as module, it will be called 'fec_mpc52xx_phy.ko'. + If compiled as module, it will be called fec_mpc52xx_phy. config NE_H8300 tristate "NE2000 compatible support for H8/300" diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 3d94e7dfea6..3359497012a 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -310,7 +310,7 @@ config PRISM54 If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . - The module will be called prism54.ko. + The module will be called prism54. config USB_ZD1201 tristate "USB ZD1201 based Wireless device support" diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig index 932d207bce2..c15db229351 100644 --- a/drivers/net/wireless/hostap/Kconfig +++ b/drivers/net/wireless/hostap/Kconfig @@ -29,7 +29,7 @@ config HOSTAP PLX/PCI/CS version of the driver to actually use the driver. The driver can be compiled as a module and it will be called - "hostap.ko". + hostap. config HOSTAP_FIRMWARE bool "Support downloading firmware images with Host AP driver" @@ -68,7 +68,7 @@ config HOSTAP_PLX driver. The driver can be compiled as a module and will be named - "hostap_plx.ko". + hostap_plx. config HOSTAP_PCI tristate "Host AP driver for Prism2.5 PCI adaptors" @@ -81,7 +81,7 @@ config HOSTAP_PCI driver. The driver can be compiled as a module and will be named - "hostap_pci.ko". + hostap_pci. config HOSTAP_CS tristate "Host AP driver for Prism2/2.5/3 PC Cards" @@ -94,4 +94,4 @@ config HOSTAP_CS driver. The driver can be compiled as a module and will be named - "hostap_cs.ko". + hostap_cs. diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 8304f6406a1..736162324ba 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -75,7 +75,7 @@ config IWLAGN If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The - module will be called iwlagn.ko. + module will be called iwlagn. config IWL4965 @@ -113,7 +113,7 @@ config IWL3945 If you want to compile the driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read . The - module will be called iwl3945.ko. + module will be called iwl3945. config IWL3945_SPECTRUM_MEASUREMENT bool "Enable Spectrum Measurement in iwl3945 driver" diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index bfc5d9cf716..1ae11c7f17a 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -9,11 +9,11 @@ menuconfig RT2X00 When building one of the individual drivers, the rt2x00 library will also be created. That library (when the driver is built as - a module) will be called "rt2x00lib.ko". + a module) will be called rt2x00lib. Additionally PCI and USB libraries will also be build depending on the types of drivers being selected, these libraries will be - called "rt2x00pci.ko" and "rt2x00usb.ko". + called rt2x00pci and rt2x00usb. if RT2X00 @@ -26,7 +26,7 @@ config RT2400PCI This adds support for rt2400 wireless chipset family. Supported chips: RT2460. - When compiled as a module, this driver will be called "rt2400pci.ko". + When compiled as a module, this driver will be called rt2400pci. config RT2500PCI tristate "Ralink rt2500 (PCI/PCMCIA) support" @@ -37,7 +37,7 @@ config RT2500PCI This adds support for rt2500 wireless chipset family. Supported chips: RT2560. - When compiled as a module, this driver will be called "rt2500pci.ko". + When compiled as a module, this driver will be called rt2500pci. config RT61PCI tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" @@ -51,7 +51,7 @@ config RT61PCI This adds support for rt2501 wireless chipset family. Supported chips: RT2561, RT2561S & RT2661. - When compiled as a module, this driver will be called "rt61pci.ko". + When compiled as a module, this driver will be called rt61pci. config RT2500USB tristate "Ralink rt2500 (USB) support" @@ -62,7 +62,7 @@ config RT2500USB This adds support for rt2500 wireless chipset family. Supported chips: RT2571 & RT2572. - When compiled as a module, this driver will be called "rt2500usb.ko". + When compiled as a module, this driver will be called rt2500usb. config RT73USB tristate "Ralink rt2501/rt73 (USB) support" @@ -75,7 +75,7 @@ config RT73USB This adds support for rt2501 wireless chipset family. Supported chips: RT2571W, RT2573 & RT2671. - When compiled as a module, this driver will be called "rt73usb.ko". + When compiled as a module, this driver will be called rt73usb. config RT2X00_LIB_PCI tristate diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index a7745c82b4a..cb909a5b504 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -8,7 +8,7 @@ config LCS Select this option if you want to use LCS networking on IBM System z. This device driver supports Token Ring (IEEE 802.5), FDDI (IEEE 802.7) and Ethernet. - To compile as a module, choose M. The module name is lcs.ko. + To compile as a module, choose M. The module name is lcs. If you do not know what it is, it's safe to choose Y. config CTCM @@ -21,7 +21,7 @@ config CTCM It also supports virtual CTCs when running under VM. This driver also supports channel-to-channel MPC SNA devices. MPC is an SNA protocol device used by Communication Server for Linux. - To compile as a module, choose M. The module name is ctcm.ko. + To compile as a module, choose M. The module name is ctcm. To compile into the kernel, choose Y. If you do not need any channel-to-channel connection, choose N. @@ -34,7 +34,7 @@ config NETIUCV link between VM guests. Using ifconfig a point-to-point connection can be established to the Linux on IBM System z running on the other VM guest. To compile as a module, choose M. - The module name is netiucv.ko. If unsure, choose Y. + The module name is netiucv. If unsure, choose Y. config SMSGIUCV tristate "IUCV special message support (VM only)" @@ -50,7 +50,7 @@ config CLAW This driver supports channel attached CLAW devices. CLAW is Common Link Access for Workstation. Common devices that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. - To compile as a module, choose M. The module name is claw.ko. + To compile as a module, choose M. The module name is claw. To compile into the kernel, choose Y. config QETH @@ -65,14 +65,14 @@ config QETH To compile this driver as a module, choose M. - The module name is qeth.ko. + The module name is qeth. config QETH_L2 tristate "qeth layer 2 device support" depends on QETH help Select this option to be able to run qeth devices in layer 2 mode. - To compile as a module, choose M. The module name is qeth_l2.ko. + To compile as a module, choose M. The module name is qeth_l2. If unsure, choose y. config QETH_L3 @@ -80,7 +80,7 @@ config QETH_L3 depends on QETH help Select this option to be able to run qeth devices in layer 3 mode. - To compile as a module choose M. The module name is qeth_l3.ko. + To compile as a module choose M. The module name is qeth_l3. If unsure, choose Y. config QETH_IPV6 diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 641e800ed69..1132c5cae7a 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -861,7 +861,7 @@ config SERIAL_UARTLITE Say Y here if you want to use the Xilinx uartlite serial controller. To compile this driver as a module, choose M here: the - module will be called uartlite.ko. + module will be called uartlite. config SERIAL_UARTLITE_CONSOLE bool "Support for console on Xilinx uartlite serial port" diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig index 9adbb4f9047..fd2c7bd9dfb 100644 --- a/drivers/w1/Kconfig +++ b/drivers/w1/Kconfig @@ -8,7 +8,7 @@ menuconfig W1 If you want W1 support, you should say Y here. This W1 support can also be built as a module. If so, the module - will be called wire.ko. + will be called wire. if W1 diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 96d2f8e4c27..3195fb8b7d9 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -12,7 +12,7 @@ config W1_MASTER_MATROX using Matrox's G400 GPIO pins. This support is also available as a module. If so, the module - will be called matrox_w1.ko. + will be called matrox_w1. config W1_MASTER_DS2490 tristate "DS2490 USB <-> W1 transport layer for 1-wire" @@ -22,7 +22,7 @@ config W1_MASTER_DS2490 for example DS9490*. This support is also available as a module. If so, the module - will be called ds2490.ko. + will be called ds2490. config W1_MASTER_DS2482 tristate "Maxim DS2482 I2C to 1-Wire bridge" @@ -56,7 +56,7 @@ config W1_MASTER_GPIO GPIO pins. This driver uses the GPIO API to control the wire. This support is also available as a module. If so, the module - will be called w1-gpio.ko. + will be called w1-gpio. config HDQ_MASTER_OMAP tristate "OMAP HDQ driver" diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 5eb8f21da82..452082f8794 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -531,7 +531,7 @@ config SBC8360_WDT Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com). To compile this driver as a module, choose M here: the - module will be called sbc8360.ko. + module will be called sbc8360. Most people will say N. diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index ca8cb326d1d..ead6c7a42f4 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -168,7 +168,7 @@ config IPV6_SIT into IPv4 packets. This is useful if you want to connect two IPv6 networks over an IPv4-only path. - Saying M here will produce a module called sit.ko. If unsure, say Y. + Saying M here will produce a module called sit. If unsure, say Y. config IPV6_NDISC_NODETYPE bool diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index cb3ad741ebf..c26a20c58dd 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -327,7 +327,7 @@ config NETFILTER_XT_TARGET_CONNMARK If you want to compile it as a module, say M here and read . The module will be called - ipt_CONNMARK.ko. If unsure, say `N'. + ipt_CONNMARK. If unsure, say `N'. config NETFILTER_XT_TARGET_CONNSECMARK tristate '"CONNSECMARK" target support' @@ -584,7 +584,7 @@ config NETFILTER_XT_MATCH_CONNMARK If you want to compile it as a module, say M here and read . The module will be called - ipt_connmark.ko. If unsure, say `N'. + ipt_connmark. If unsure, say `N'. config NETFILTER_XT_MATCH_CONNTRACK tristate '"conntrack" connection tracking match support' -- cgit v1.2.3 From 3ac49a1c9928b4a242b3cb1d83bc1d5c9b8fcb50 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 4 Jun 2009 16:20:28 +0200 Subject: trivial: fix ETIMEOUT -> ETIMEDOUT typos fix ETIMEOUT -> ETIMEDOUT typos Signed-off-by: Jean Delvare Signed-off-by: Jiri Kosina --- arch/arm/mach-sa1100/jornada720_ssp.c | 4 ++-- drivers/net/qlge/qlge_main.c | 2 +- drivers/net/usb/usbnet.c | 2 +- drivers/staging/wlan-ng/hfa384x_usb.c | 2 +- kernel/rtmutex.c | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index 28cf3696797..506a5e5a9ad 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c @@ -54,7 +54,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse); * timeout after rounds. Needs mcu running before its called. * * returns : %mcu output on success - * : %-ETIMEOUT on timeout + * : %-ETIMEDOUT on timeout */ int jornada_ssp_byte(u8 byte) { @@ -82,7 +82,7 @@ EXPORT_SYMBOL(jornada_ssp_byte); * jornada_ssp_inout - decide if input is command or trading byte * * returns : (jornada_ssp_byte(byte)) on success - * : %-ETIMEOUT on timeout failure + * : %-ETIMEDOUT on timeout failure */ int jornada_ssp_inout(u8 byte) { diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c92ced24794..1fd5ecb2442 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -3174,7 +3174,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev) if (value & RST_FO_FR) { QPRINTK(qdev, IFDOWN, ERR, - "ETIMEOUT!!! errored out of resetting the chip!\n"); + "ETIMEDOUT!!! errored out of resetting the chip!\n"); status = -ETIMEDOUT; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f3a2fce6166..47f68cfa7e2 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -398,7 +398,7 @@ static void rx_complete (struct urb *urb) /* stalls need manual reset. this is rare ... except that * when going through USB 2.0 TTs, unplug appears this way. - * we avoid the highspeed version of the ETIMEOUT/EILSEQ + * we avoid the highspeed version of the ETIMEDOUT/EILSEQ * storm, recovering as needed. */ case -EPIPE: diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 888198c9a10..824e65bdc43 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -2424,7 +2424,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len) * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error -* -ETIMEOUT timout waiting for the cmd regs to become +* -ETIMEDOUT timout waiting for the cmd regs to become * available, or waiting for the control reg * to indicate the Aux port is enabled. * -ENODATA the buffer does NOT contain a valid PDA. diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 820c5af44f3..fcd107a78c5 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -902,7 +902,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); * Returns: * 0 on success * -EINTR when interrupted by a signal - * -ETIMEOUT when the timeout expired + * -ETIMEDOUT when the timeout expired * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int -- cgit v1.2.3 From faea56c9bb44f539da1ae0194184873fc2720b20 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 12 Jun 2009 11:43:48 -0700 Subject: [SCSI] cnic: fix undefined reference to `ip6_route_output' Fix cnic build for case of CONFIG_INET=n. Fix cnic build for case of CONFIG_IPV6=m and CONFIG_CNIC=y. Fixes these build errors: cnic.c:(.text+0x236a1d): undefined reference to `ip_route_output_key' cnic.c:(.text+0x15a8e8): undefined reference to `ip6_route_output' Signed-off-by: Randy Dunlap Signed-off-by: James Bottomley --- drivers/net/cnic.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 8d740376bbd..a9e2fd35bb4 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -1454,6 +1454,7 @@ static inline u16 cnic_get_vlan(struct net_device *dev, static int cnic_get_v4_route(struct sockaddr_in *dst_addr, struct dst_entry **dst) { +#if defined(CONFIG_INET) struct flowi fl; int err; struct rtable *rt; @@ -1465,12 +1466,15 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, if (!err) *dst = &rt->u.dst; return err; +#else + return -ENETUNREACH; +#endif } static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct dst_entry **dst) { -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) struct flowi fl; memset(&fl, 0, sizeof(fl)); @@ -1550,7 +1554,7 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) clear_bit(SK_F_IPV6, &csk->flags); if (is_v6) { -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) set_bit(SK_F_IPV6, &csk->flags); err = cnic_get_v6_route(&saddr->remote.v6, &dst); if (err) -- cgit v1.2.3 From bc3bf8fd330ce981ce632a1a4a283eee46838f32 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 13 Jun 2009 08:29:33 +0200 Subject: [SCSI] cnic: fix error: implicit declaration of function ‘__symbol_get’ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/cnic.c: In function ‘init_bnx2_cnic’: drivers/net/cnic.c:2520: error: implicit declaration of function ‘__symbol_get’ drivers/net/cnic.c:2520: warning: assignment makes pointer from integer without a cast make[1]: *** [drivers/net/cnic.o] Error 1 make: *** [drivers/net/cnic.o] Error 2 Caused by not including linux/module.h Signed-off-by: Ingo Molnar Signed-off-by: James Bottomley --- drivers/net/cnic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index a9e2fd35bb4..44f77eb1180 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -25,6 +25,8 @@ #include #include #include +#include + #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define BCM_VLAN 1 #endif -- cgit v1.2.3 From d1fdf24b4074a8d962f9a28519c99dcdd66bdee3 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sun, 14 Jun 2009 13:30:45 -0700 Subject: mlx4_core: Don't double-free IRQs when falling back from MSI-X to INTx When both MSI-X and legacy INTx fail to generate an interrupt, the driver frees the MSI-X interrupts twice. Fix this by clearing the have_irq flag for the MSI-X interrupts when they are freed the first time. This is the same bug that was reported in ib_mthca by Yinghai Lu . Signed-off-by: Roland Dreier --- drivers/net/mlx4/eq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 8830dcb92ec..ce064e32420 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) if (eq_table->have_irq) free_irq(dev->pdev->irq, dev); for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) - if (eq_table->eq[i].have_irq) + if (eq_table->eq[i].have_irq) { free_irq(eq_table->eq[i].irq, eq_table->eq + i); + eq_table->eq[i].have_irq = 0; + } kfree(eq_table->irq_names); } -- cgit v1.2.3