diff options
author | Michael Chan <michael.chan@broadcom.com> | 2020-10-04 15:22:56 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-10-04 14:41:05 -0700 |
commit | 975bc99a4a397d1b1584a99b4adb7948b6e6944b (patch) | |
tree | 8f44965a4d6433ed04be966edf366dac4d5306f1 /drivers | |
parent | fc8864e0b6ee2120d9b438f411159afe99348ff0 (diff) |
bnxt_en: Refactor bnxt_free_rx_skbs().
bnxt_free_rx_skbs() frees all the allocated buffers and SKBs for
every RX ring. Refactor this function by calling a new function
bnxt_free_one_rx_ring_skbs() to free these buffers on one specified
RX ring at a time. This is preparation work for resetting one RX
ring during run-time.
Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 134 |
1 files changed, 66 insertions, 68 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 27fbe0cef2a9..6d7e197c875c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2540,93 +2540,91 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) } } -static void bnxt_free_rx_skbs(struct bnxt *bp) +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) { - int i, max_idx, max_agg_idx; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; struct pci_dev *pdev = bp->pdev; - - if (!bp->rx_ring) - return; + struct bnxt_tpa_idx_map *map; + int i, max_idx, max_agg_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_tpa_idx_map *map; - int j; - - if (rxr->rx_tpa) { - for (j = 0; j < bp->max_tpa; j++) { - struct bnxt_tpa_info *tpa_info = - &rxr->rx_tpa[j]; - u8 *data = tpa_info->data; + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; - if (!data) - continue; + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; + u8 *data = tpa_info->data; - dma_unmap_single_attrs(&pdev->dev, - tpa_info->mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + if (!data) + continue; - tpa_info->data = NULL; + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); - kfree(data); - } - } + tpa_info->data = NULL; - for (j = 0; j < max_idx; j++) { - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; - dma_addr_t mapping = rx_buf->mapping; - void *data = rx_buf->data; + kfree(data); + } - if (!data) - continue; +skip_rx_tpa_free: + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + dma_addr_t mapping = rx_buf->mapping; + void *data = rx_buf->data; - rx_buf->data = NULL; + if (!data) + continue; - if (BNXT_RX_PAGE_MODE(bp)) { - mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, - PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - kfree(data); - } + rx_buf->data = NULL; + if (BNXT_RX_PAGE_MODE(bp)) { + mapping -= bp->rx_dma_offset; + dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, + bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + page_pool_recycle_direct(rxr->page_pool, data); + } else { + dma_unmap_single_attrs(&pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + kfree(data); } + } + for (i = 0; i < max_agg_idx; i++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; + struct page *page = rx_agg_buf->page; - for (j = 0; j < max_agg_idx; j++) { - struct bnxt_sw_rx_agg_bd *rx_agg_buf = - &rxr->rx_agg_ring[j]; - struct page *page = rx_agg_buf->page; - - if (!page) - continue; + if (!page) + continue; - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, - DMA_ATTR_WEAK_ORDERING); + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + DMA_ATTR_WEAK_ORDERING); - rx_agg_buf->page = NULL; - __clear_bit(j, rxr->rx_agg_bmap); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - __free_page(page); - } - if (rxr->rx_page) { - __free_page(rxr->rx_page); - rxr->rx_page = NULL; - } - map = rxr->rx_tpa_idx_map; - if (map) - memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); + __free_page(page); + } + if (rxr->rx_page) { + __free_page(rxr->rx_page); + rxr->rx_page = NULL; } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); +} + +static void bnxt_free_rx_skbs(struct bnxt *bp) +{ + int i; + + if (!bp->rx_ring) + return; + + for (i = 0; i < bp->rx_nr_rings; i++) + bnxt_free_one_rx_ring_skbs(bp, i); } static void bnxt_free_skbs(struct bnxt *bp) |