summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorstephen hemminger <shemminger@vyatta.com>2010-12-06 12:33:01 +0000
committerDavid S. Miller <davem@davemloft.net>2010-12-10 15:03:24 -0800
commit4afb7527ac8cc7bd8f03570e12f6eed0eca03363 (patch)
treed2106630e16f7495ac09f230382d85df5182da8c
parent376d940ee91318cc6becefbb9454bb4454d7473f (diff)
sfc: convert references to LRO to GRO
This driver now uses Generic Receive Offload, not the older LRO. Change references to LRO in names and comments. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/sfc/rx.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6d0959b5158..3925fd62117 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -37,7 +37,7 @@
* This driver supports two methods for allocating and using RX buffers:
* each RX buffer may be backed by an skb or by an order-n page.
*
- * When LRO is in use then the second method has a lower overhead,
+ * When GRO is in use then the second method has a lower overhead,
* since we don't have to allocate then free skbs on reassembled frames.
*
* Values:
@@ -50,25 +50,25 @@
*
* - Since pushing and popping descriptors are separated by the rx_queue
* size, so the watermarks should be ~rxd_size.
- * - The performance win by using page-based allocation for LRO is less
- * than the performance hit of using page-based allocation of non-LRO,
+ * - The performance win by using page-based allocation for GRO is less
+ * than the performance hit of using page-based allocation of non-GRO,
* so the watermarks should reflect this.
*
* Per channel we maintain a single variable, updated by each channel:
*
- * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
+ * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
* RX_ALLOC_FACTOR_SKB)
* Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
* limits the hysteresis), and update the allocation strategy:
*
- * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
+ * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
* RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
*/
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-#define RX_ALLOC_LEVEL_LRO 0x2000
+#define RX_ALLOC_LEVEL_GRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_LRO 1
+#define RX_ALLOC_FACTOR_GRO 1
#define RX_ALLOC_FACTOR_SKB (-2)
/* This is the percentage fill level below which new RX descriptors
@@ -441,19 +441,19 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through the generic LRO stack
+/* Pass a received packet up through the generic GRO stack
*
* Handles driverlink veto, and passes the fragment up via
- * the appropriate LRO method
+ * the appropriate GRO method
*/
-static void efx_rx_packet_lro(struct efx_channel *channel,
+static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
bool checksummed)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
- /* Pass the skb/page into the LRO engine */
+ /* Pass the skb/page into the GRO engine */
if (rx_buf->page) {
struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->page;
@@ -499,7 +499,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
if (gro_result == GRO_NORMAL) {
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
} else if (gro_result != GRO_DROP) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
channel->irq_mod_score += 2;
}
}
@@ -605,7 +605,7 @@ void __efx_rx_packet(struct efx_channel *channel,
}
if (likely(checksummed || rx_buf->page)) {
- efx_rx_packet_lro(channel, rx_buf, checksummed);
+ efx_rx_packet_gro(channel, rx_buf, checksummed);
return;
}
@@ -628,7 +628,7 @@ void efx_rx_strategy(struct efx_channel *channel)
{
enum efx_rx_alloc_method method = rx_alloc_method;
- /* Only makes sense to use page based allocation if LRO is enabled */
+ /* Only makes sense to use page based allocation if GRO is enabled */
if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +639,7 @@ void efx_rx_strategy(struct efx_channel *channel)
channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
/* Decide on the allocation method */
- method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
+ method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
}