summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c17
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/dma-buf/dma-buf.c7
-rw-r--r--drivers/dma-buf/reservation.c72
-rw-r--r--drivers/gpio/gpio-lpc32xx.c48
-rw-r--r--drivers/gpio/gpiolib.c51
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c11
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c18
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/smsc95xx.c51
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/ptp/ptp_chardev.c12
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c9
45 files changed, 497 insertions, 261 deletions
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index a969a7e443be..85aaf2222587 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -181,13 +181,17 @@ static char *res_strings[] = {
"reserved 27",
"reserved 28",
"reserved 29",
- "reserved 30",
+ "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
"reassembly abort: no buffers",
"receive buffer overflow",
"change in GFC",
"receive buffer full",
"low priority discard - no receive descriptor",
"low priority discard - missing end of packet",
+ "reserved 37",
+ "reserved 38",
+ "reserved 39",
+ "reseverd 40",
"reserved 41",
"reserved 42",
"reserved 43",
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7d00f2994738..809dd1e02091 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
/* make the ptr point to the corresponding buffer desc entry */
buf_desc_ptr += desc;
if (!desc || (desc > iadev->num_rx_desc) ||
- ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
+ ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
free_desc(dev, desc);
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
return -1;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395cb8d8..0d0d4529ee36 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
+ u32 unit_size;
int ret;
if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
- if (!(req->nbytes & (unit_size_map[unit].size - 1)))
- break;
+ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+ if (req->nbytes <= unit_size_map[0].size) {
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+ if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+ unit_size = unit_size_map[unit].value;
+ break;
+ }
+ }
+ }
- if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
- rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.unit_size = unit_size;
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6eefaa2fe58f..63464e86f2b1 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1986,7 +1986,7 @@ err_algs:
&dd->pdata->algs_info[i].algs_list[j]);
err_pm:
pm_runtime_disable(dev);
- if (dd->polling_mode)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a2c07ee6677..6355ab38d630 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -33,6 +33,7 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/reservation.h>
+#include <linux/mm.h>
#include <uapi/linux/dma-buf.h>
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf = file->private_data;
/* check for overflowing the buffer's size */
- if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (vma->vm_pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* check for offset overflow */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ if (pgoff + vma_pages(vma) < pgoff)
return -EOVERFLOW;
/* check for overflowing the buffer's size */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0bd5722c997..9566a62ad8e3 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -35,6 +35,17 @@
#include <linux/reservation.h>
#include <linux/export.h>
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer. A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations). The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
DEFINE_WW_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence(). Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
*/
int reservation_object_reserve_shared(struct reservation_object *obj)
{
@@ -180,7 +199,11 @@ done:
fence_put(old_fence);
}
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
* Add a fence to a shared slot, obj->lock must be held, and
* reservation_object_reserve_shared_fence has been called.
*/
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot. The obj->lock must be held.
+ */
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
{
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct fence **pfence_excl,
unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
}
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
return ret;
}
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014daeef9..fc5f197906ac 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
{
- return IRQ_LPC32XX_P0_P1_IRQ;
+ return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
- IRQ_LPC32XX_GPIO_00,
- IRQ_LPC32XX_GPIO_01,
- IRQ_LPC32XX_GPIO_02,
- IRQ_LPC32XX_GPIO_03,
- IRQ_LPC32XX_GPIO_04,
- IRQ_LPC32XX_GPIO_05,
-};
-
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
- return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
- IRQ_LPC32XX_GPI_00,
- IRQ_LPC32XX_GPI_01,
- IRQ_LPC32XX_GPI_02,
- IRQ_LPC32XX_GPI_03,
- IRQ_LPC32XX_GPI_04,
- IRQ_LPC32XX_GPI_05,
- IRQ_LPC32XX_GPI_06,
- IRQ_LPC32XX_GPI_07,
- IRQ_LPC32XX_GPI_08,
- IRQ_LPC32XX_GPI_09,
- -ENXIO, /* 10 */
- -ENXIO, /* 11 */
- -ENXIO, /* 12 */
- -ENXIO, /* 13 */
- -ENXIO, /* 14 */
- -ENXIO, /* 15 */
- -ENXIO, /* 16 */
- -ENXIO, /* 17 */
- -ENXIO, /* 18 */
- IRQ_LPC32XX_GPI_19,
- -ENXIO, /* 20 */
- -ENXIO, /* 21 */
- -ENXIO, /* 22 */
- -ENXIO, /* 23 */
- -ENXIO, /* 24 */
- -ENXIO, /* 25 */
- -ENXIO, /* 26 */
- -ENXIO, /* 27 */
- IRQ_LPC32XX_GPI_28,
-};
-
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
- return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
return -ENXIO;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d407f904a31c..24f60d28f0c0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct gpio_device *gdev = filp->private_data;
struct gpio_chip *chip = gdev->chip;
- int __user *ip = (int __user *)arg;
+ void __user *ip = (void __user *)arg;
/* We fail any subsequent ioctl():s when the chip is gone */
if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = gpio_ioctl,
- .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gpio_ioctl_compat,
+#endif
};
static void gpiodevice_release(struct device *dev)
@@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
goto err_free_label;
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
@@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
}
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1356,10 +1367,13 @@ done:
/*
* This descriptor validation needs to be inserted verbatim into each
* function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
*/
#define VALIDATE_DESC(desc) do { \
- if (!desc || !desc->gdev) { \
+ if (!desc) \
+ return 0; \
+ if (!desc->gdev) { \
pr_warn("%s: invalid GPIO\n", __func__); \
return -EINVAL; \
} \
@@ -1370,7 +1384,9 @@ done:
} } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc || !desc->gdev) { \
+ if (!desc) \
+ return; \
+ if (!desc->gdev) { \
pr_warn("%s: invalid GPIO\n", __func__); \
return; \
} \
@@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
*/
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
+ struct gpio_desc *desc;
+
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ /* Flush direction if something changed behind our back */
+ if (chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
+
+ if (dir)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
- if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+ if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &desc->flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550eff..058460bdd5a6 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus",
+ bus->name = "Synopsys MII Bus";
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..d02c4240b7df 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,6 +96,10 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+ unsigned int rx_frag_size;
+
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 9fe8b5e310d1..c98acdc0d14f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
+static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ struct page *page;
+
+ if (alx->rx_frag_size > PAGE_SIZE)
+ return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+
+ page = alx->rx_page;
+ if (!page) {
+ alx->rx_page = page = alloc_page(gfp);
+ if (unlikely(!page))
+ return NULL;
+ alx->rx_page_offset = 0;
+ }
+
+ skb = build_skb(page_address(page) + alx->rx_page_offset,
+ alx->rx_frag_size);
+ if (likely(skb)) {
+ alx->rx_page_offset += alx->rx_frag_size;
+ if (alx->rx_page_offset >= PAGE_SIZE)
+ alx->rx_page = NULL;
+ else
+ get_page(page);
+ }
+ return skb;
+}
+
+
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ skb = alx_alloc_skb(alx, gfp);
if (!skb)
break;
dma = dma_map_single(&alx->hw.pdev->dev,
@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
+
return count;
}
@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
+ if (alx->rx_page) {
+ put_page(alx->rx_page);
+ alx->rx_page = NULL;
+ }
+
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
+
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
+ unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
+
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
+ head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ alx->rx_frag_size = roundup_pow_of_two(head_size);
+
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
+ unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
+ head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0a5b770cefaa..c5fe915870ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
dev_err(&bp->pdev->dev,
"Cannot map doorbells, bar size too small, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
doorbell_size);
@@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
if (IS_VF(bp)) {
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
}
/* Enable SRIOV if capability found in configuration space */
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_set_int_mode(bp);
if (rc) {
dev_err(&pdev->dev, "Cannot set interrupts\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("set interrupts successfully\n");
@@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
@@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
init_one_exit:
bnx2x_disable_pcie_error_reporting(bp);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 085f9125cf42..06f031715b57 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
* re-adding ourselves to the poll list.
*/
- if (priv->tx_skb && !tx_ctrl_ct)
+ if (priv->tx_skb && !tx_ctrl_ct) {
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
napi_reschedule(napi);
+ }
}
return work_done;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ca2cccc594fd..3c0255e98535 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- continue;
- }
+ if (!skb)
+ goto skb_done;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
-
+skb_done:
/* Make sure the update to bdp and tx_skbuff are performed
* before dirty_tx
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887873..67a648c7d3a9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
u32 link_stat = priv->link;
struct hnae_handle *h;
- assert(priv && priv->ae_handle);
h = priv->ae_handle;
if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- assert(priv);
-
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
struct hnae_handle *h;
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
h = priv->ae_handle;
ops = h->dev->ops;
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
struct hnae_ae_ops *ops;
int ret;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (!ops->get_regs_len) {
netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 01fccec632ec..466939f8f0cf 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
hwbm_pool->construct = mvneta_bm_construct;
hwbm_pool->priv = new_pool;
+ spin_lock_init(&hwbm_pool->lock);
/* Create new pool */
err = mvneta_bm_pool_create(priv, new_pool);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb323..fc95affaf76b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- data[index++] = ((unsigned long *)&priv->stats)[i];
+ data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 92e0624f4cf0..19ceced6736c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
- memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
- return &priv->ret_stats;
+ return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->tx_dropped = 0;
+ priv->tx_ring[i]->queue_stopped = 0;
+ priv->tx_ring[i]->wake_queue = 0;
+ priv->tx_ring[i]->tso_packets = 0;
+ priv->tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e678b8..5aa8b751f417 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
- struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device *dev = mdev->pndev[port];
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets += ring->packets;
stats->tx_bytes += ring->bytes;
+ stats->tx_dropped += ring->tx_dropped;
priv->port_stats.tx_chksum_offload += ring->tx_csum;
priv->port_stats.queue_stopped += ring->queue_stopped;
priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
- stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
- stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = 0;
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
- stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f6e61570cb2c..76aa4d27183c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool inline_ok;
u32 ring_cons;
- if (!priv->port_up)
- goto tx_drop;
-
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
+ if (!priv->port_up)
+ goto tx_drop;
+
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons);
@@ -1030,7 +1030,7 @@ tx_drop_unmap:
tx_drop:
dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
+ ring->tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index cc84e09f324a..467d47ed2c39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
+ unsigned int tx_dropped;
struct mlx4_bf bf;
unsigned long queue_stopped;
@@ -482,8 +483,6 @@ struct mlx4_en_priv {
struct mlx4_en_port_profile *prof;
struct net_device *dev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct net_device_stats stats;
- struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cbf58e1f9333..21ec1c2df2c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, bool dcbx_enabled)
{
- u8 tc, priority, priority_map;
+ u8 tc, priority_map;
enum dcbx_protocol_type type;
u16 protocol_id;
+ int priority;
bool enable;
int i;
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !!(type == DCBX_PROTOCOL_ETH);
+ enable = !(type == DCBX_PROTOCOL_ETH);
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 089016f46f26..2d89e8c16b32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
u16 num_pqs, multi_cos_tcs = 1;
+ u8 pf_wfq = qm_info->pf_wfq;
+ u32 pf_rl = qm_info->pf_rl;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
- qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_KERNEL);
+ qm_info->qm_pq_params = kcalloc(num_pqs,
+ sizeof(struct init_qm_pq_params),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
- qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_KERNEL);
+ qm_info->qm_vport_params = kcalloc(num_vports,
+ sizeof(struct init_qm_vport_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
- qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_KERNEL);
+ qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+ sizeof(struct init_qm_port_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
- qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
- GFP_KERNEL);
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->wfq_data)
goto alloc_err;
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return 0;
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_qm_info_free(p_hwfn);
/* initialize qed's qm data structure */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, false);
if (rc)
return rc;
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
hw_mode |= 1 << MODE_ASIC;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
{
int i;
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 8b22f87033ce..753064679bde 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
/* Fallthrough */
case QED_INT_MODE_MSI:
- rc = pci_enable_msi(cdev->pdev);
- if (!rc) {
- int_params->out.int_mode = QED_INT_MODE_MSI;
- goto out;
- }
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
- DP_NOTICE(cdev, "Failed to enable MSI\n");
- if (force_mode)
- goto out;
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
/* Fallthrough */
case QED_INT_MODE_INTA:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 1bc75358cbc4..ad3cae3b7243 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
- return QEDE_ETHTOOL_TEST_MAX;
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 337e839ca586..5d00d1404bfc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
{
struct qede_dev *edev = netdev_priv(dev);
- return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
max_tx_rate);
}
@@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_QEDE_VXLAN
static void qede_add_vxlan_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
@@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_link_state = qede_set_vf_link_state,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 83d72106471c..fd5d1c93b55b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
}
/* Disabling the timer */
- del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ del_timer_sync(&qdev->timer);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1681084cc96f..1f309127457d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -619,6 +619,17 @@ fail:
return rc;
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ /* All our existing PIO buffers went away */
+ efx_for_each_channel(channel, efx)
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->piobuf = NULL;
+}
+
#else /* !EFX_USE_PIO */
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
{
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
#endif /* EFX_USE_PIO */
static void efx_ef10_remove(struct efx_nic *efx)
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
+ efx_ef10_forget_old_piobufs(efx);
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869487..097f363f1630 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
- efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*efx->rps_flow_id),
- GFP_KERNEL);
- if (!efx->rps_flow_id) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_flow_id);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321cda..d13ddf9703ff 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- * indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
+ unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2fe7..02b0b5272c14 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const __be16 *ports;
- __be16 ether_type;
- int nhoff;
+ struct flow_keys fk;
int rc;
- /* The core RPS/RFS code has already parsed and validated
- * VLAN, IP and transport headers. We assume they are in the
- * header area.
- */
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- const struct vlan_hdr *vh =
- (const struct vlan_hdr *)skb->data;
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
- /* We can't filter on the IP 5-tuple and the vlan
- * together, so just strip the vlan header and filter
- * on the IP part.
- */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
- ether_type = vh->h_vlan_encapsulated_proto;
- nhoff = sizeof(struct vlan_hdr);
- } else {
- ether_type = skb->protocol;
- nhoff = 0;
- }
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = ether_type;
-
- if (ether_type == htons(ETH_P_IP)) {
- const struct iphdr *ip =
- (const struct iphdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- spec.ip_proto = ip->protocol;
- spec.rem_host[0] = ip->saddr;
- spec.loc_host[0] = ip->daddr;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- const struct ipv6hdr *ip6 =
- (const struct ipv6hdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(*ip6) + 4);
- spec.ip_proto = ip6->nexthdr;
- memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
- memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
- ports = (const __be16 *)(ip6 + 1);
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
- spec.rem_port = ports[0];
- spec.loc_port = ports[1];
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
- efx->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ channel = efx_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
- if (ether_type == htons(ETH_P_IP))
+ if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int index, size;
+ unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
- flow_id = efx->rps_flow_id[index];
- if (expire_one(efx, flow_id, index))
+ struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [flow %u]\n",
- index, flow_id);
- if (++index == size)
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
index = 0;
+ }
}
+ efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 3f83c369f56c..ec295851812b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
return -ENOMEM;
if (mdio_bus_data->irqs)
- memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
+ memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
#ifdef CONFIG_OF
if (priv->device->of_node)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a0f64cba86ba..2ace126533cd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
-static void __team_compute_features(struct team *team)
+static void ___team_compute_features(struct team *team)
{
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+}
+static void __team_compute_features(struct team *team)
+{
+ ___team_compute_features(team);
netdev_change_features(team->dev);
}
static void team_compute_features(struct team *team)
{
mutex_lock(&team->lock);
- __team_compute_features(team);
+ ___team_compute_features(team);
mutex_unlock(&team->lock);
+ netdev_change_features(team->dev);
}
static int team_port_enter(struct team *team, struct team_port *port)
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 36cd7f016a8d..9bbe0161a2f4 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
}
- if (!count || count < 4)
+ if (count < 4)
goto goon;
rx_status = buf[count - 2];
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d9d2806a47b1..dc989a8b5afb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ bool link_ok;
+ struct delayed_work carrier_check;
+ struct usbnet *dev;
};
static bool turbo_mode = true;
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
+static void set_carrier(struct usbnet *dev, bool link)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ if (pdata->link_ok == link)
+ return;
+
+ pdata->link_ok = link;
+
+ if (link)
+ usbnet_link_change(dev, 1, 0);
+ else
+ usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+ struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+ carrier_check.work);
+ struct usbnet *dev = pdata->dev;
+ int ret;
+
+ if (pdata->suspend_flags != 0)
+ return;
+
+ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+ return;
+ }
+ if (ret & BMSR_LSTATUS)
+ set_carrier(dev, 1);
+ else
+ set_carrier(dev, 0);
+
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ pdata->dev = dev;
+ INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
return 0;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
if (pdata) {
+ cancel_delayed_work(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e540343..e0638e556fe7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- /* Last of all, set up some receive buffers. */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].vq->num_free ==
- virtqueue_get_vring_size(vi->rq[i].vq)) {
- free_unused_bufs(vi);
- err = -ENOMEM;
- goto free_recv_bufs;
- }
- }
-
vi->nb.notifier_call = &virtnet_cpu_callback;
err = register_hotcpu_notifier(&vi->nb);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_recv_bufs;
+ goto free_unregister_netdev;
}
/* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
-free_recv_bufs:
+free_unregister_netdev:
vi->vdev->config->reset(vdev);
- free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8ff30c3bdfce..f999db2f97b4 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ if (tb[IFLA_MTU])
+ conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+
err = vxlan_dev_configure(src_net, dev, &conf);
switch (err) {
case -ENODEV:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 207b13b618cf..a607655d7830 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
const struct mtk_desc_pin *pin;
chained_irq_enter(chip, desc);
- for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+ for (eint_num = 0;
+ eint_num < pctl->devdata->ap_num;
+ eint_num += 32, reg += 4) {
status = readl(reg);
- reg += 4;
while (status) {
offset = __ffs(status);
index = eint_num + offset;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ccbfc325c778..38faceff2f08 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
clk_enable(nmk_chip->clk);
- dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
clk_disable(nmk_chip->clk);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 579fd65299a0..d637c933c8a9 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_SYS_OFFSET:
- sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
- if (!sysoff) {
- err = -ENOMEM;
- break;
- }
- if (copy_from_user(sysoff, (void __user *)arg,
- sizeof(*sysoff))) {
- err = -EFAULT;
+ sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+ if (IS_ERR(sysoff)) {
+ err = PTR_ERR(sysoff);
+ sysoff = NULL;
break;
}
if (sysoff->n_samples > PTP_MAX_SAMPLES) {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8f90d9e77104..969c312de1be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -621,6 +621,11 @@ struct aac_driver_ident
#define AAC_QUIRK_SCSI_32 0x0020
/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
+/*
* The adapter interface specs all queues to be located in the same
* physically contiguous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a943bd230bc2..79871f3519ff 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
shost->this_id = shost->max_id;
- aac_intr_normal(aac, 0, 2, 0, NULL);
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
/*
* dmb - we may need to move the setting of these parms somewhere else once
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6a4df5a315e9..6bff13e7afc7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
ActiveCableEventData =
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
if (ActiveCableEventData->ReasonCode ==
- MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+ MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
ioc->name, ActiveCableEventData->ReceptacleID);
pr_info("cannot be powered and devices connected to this active cable");
pr_info("will not be seen. This active cable");
pr_info("requires %d mW of power",
ActiveCableEventData->ActiveCablePowerRequirement);
+ }
break;
default: /* ignore the rest */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2e332af0f51..c71344aebdbb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
/*
- * If we finished all bytes in the request we are done now.
+ * special case: failed zero length commands always need to
+ * drop down into the retry code. Otherwise, if we finished
+ * all bytes in the request we are done now.
*/
- if (!scsi_end_request(req, error, good_bytes, 0))
+ if (!(blk_rq_bytes(req) == 0 && error) &&
+ !scsi_end_request(req, error, good_bytes, 0))
return;
/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 428c03ef02b2..f459dff30512 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
@@ -1459,6 +1463,7 @@ out:
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}