summaryrefslogtreecommitdiff
path: root/drivers/net/octeon/octeon_mgmt.c
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-05-05 13:03:09 +0000
committerDavid S. Miller <davem@davemloft.net>2010-05-05 21:22:33 -0700
commit4d30b8013b2d82138d6900965fe9fcd062f2d06d (patch)
treea52d03c623016cd9a7716a2641c105e7def47b3f /drivers/net/octeon/octeon_mgmt.c
parent62538d2490d071e822d85651445c8a0bb4ed5a4b (diff)
netdev: octeon_mgmt: Fix race condition freeing TX buffers.
Under heavy load the TX cleanup tasklet and xmit threads would race and try to free too many buffers. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/octeon/octeon_mgmt.c')
-rw-r--r--drivers/net/octeon/octeon_mgmt.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index bbbd737210f..b975a2fad95 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
- spin_lock_irqsave(&p->tx_list.lock, flags);
-
re.d64 = p->tx_ring[p->tx_next_clean];
p->tx_next_clean =
(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;