diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-14 18:51:48 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-14 18:51:48 -0800 |
commit | 6f37ac793d6ba7b35d338f791974166f67fdd9ba (patch) | |
tree | ce7e05fc4ede544021f4e395346a87f649f581f7 /net | |
parent | 2f1f53bdc6531696934f6ee7bbdfa2ab4f4f62a3 (diff) | |
parent | d90bf5a976793edfa88d3bb2393f0231eb8ce1e5 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[NET]: rt_check_expire() can take a long time, add a cond_resched()
[ISDN] sc: Really, really fix warning
[ISDN] sc: Fix sndpkt to have the correct number of arguments
[TCP] FRTO: Clear frto_highmark only after process_frto that uses it
[NET]: Remove notifier block from chain when register_netdevice_notifier fails
[FS_ENET]: Fix module build.
[TCP]: Make sure write_queue_from does not begin with NULL ptr
[TCP]: Fix size calculation in sk_stream_alloc_pskb
[S2IO]: Fixed memory leak when MSI-X vector allocation fails
[BONDING]: Fix resource use after free
[SYSCTL]: Fix warning for token-ring from sysctl checker
[NET] random : secure_tcp_sequence_number should not assume CONFIG_KTIME_SCALAR
[IWLWIFI]: Not correctly dealing with hotunplug.
[TCP] FRTO: Plug potential LOST-bit leak
[TCP] FRTO: Limit snd_cwnd if TCP was application limited
[E1000]: Fix schedule while atomic when called from mii-tool.
[NETX]: Fix build failure added by 2.6.24 statistics cleanup.
[EP93xx_ETH]: Build fix after 2.6.24 NAPI changes.
[PKT_SCHED]: Check subqueue status before calling hard_start_xmit
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 16 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 5 |
4 files changed, 21 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index dd40b35bb00..86d62611f2f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1171,6 +1171,8 @@ rollback: nb->notifier_call(nb, NETDEV_UNREGISTER, dev); } } + + raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 45651834e1e..1bff9ed349f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -578,6 +578,9 @@ static void rt_check_expire(struct work_struct *work) i = (i + 1) & rt_hash_mask; rthp = &rt_hash_table[i].chain; + if (need_resched()) + cond_resched(); + if (*rthp == NULL) continue; spin_lock_bh(rt_hash_lock_addr(i)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 20c9440ab85..0f0c1c9829a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1269,6 +1269,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; + if (!tp->packets_out) + goto out; + /* SACK fastpath: * if the only SACK change is the increase of the end_seq of * the first block then only apply that SACK block @@ -1515,6 +1518,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) tcp_update_reordering(sk, tp->fackets_out - reord, 0); +out: + #if FASTRETRANS_DEBUG > 0 BUG_TRAP((int)tp->sacked_out >= 0); BUG_TRAP((int)tp->lost_out >= 0); @@ -1669,6 +1674,9 @@ void tcp_enter_frto(struct sock *sk) } tcp_verify_left_out(tp); + /* Too bad if TCP was application limited */ + tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); + /* Earlier loss recovery underway (see RFC4138; Appendix B). * The last condition is necessary at least in tp->frto_counter case. */ @@ -1701,6 +1709,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; + + TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; /* * Count the retransmission made on RTO correctly (only when * waiting for the first ACK and did not get it)... @@ -1714,7 +1724,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) } else { if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; } /* Don't lost mark skbs that were fwd transmitted after RTO */ @@ -3103,11 +3113,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); + if (tp->frto_counter) + frto_cwnd = tcp_process_frto(sk, flag); /* Guarantee sacktag reordering detection against wrap-arounds */ if (before(tp->frto_highmark, tp->snd_una)) tp->frto_highmark = 0; - if (tp->frto_counter) - frto_cwnd = tcp_process_frto(sk, flag); if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fa1a6f45dc4..e595e6570ce 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -134,7 +134,7 @@ static inline int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; - int ret; + int ret = NETDEV_TX_BUSY; /* Dequeue packet */ if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) @@ -145,7 +145,8 @@ static inline int qdisc_restart(struct net_device *dev) spin_unlock(&dev->queue_lock); HARD_TX_LOCK(dev, smp_processor_id()); - ret = dev_hard_start_xmit(skb, dev); + if (!netif_subqueue_stopped(dev, skb)) + ret = dev_hard_start_xmit(skb, dev); HARD_TX_UNLOCK(dev); spin_lock(&dev->queue_lock); |