summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 07:54:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 07:54:15 -0800
commitce38aa9cbed3d109355b0169b520362c409c0541 (patch)
tree621511c34edd22ac30ca12f78f0d478245b4ccd7 /net/ipv4/tcp_input.c
parent69973b830859bc6529a7a0468ba0d80ee5117826 (diff)
parentd84701ecbcd6ad63faa7a9c18ad670d1c4d561c0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Platform regulatory domain support for ath10k, from Bartosz Markowski. 2) Centralize min/max MTU checking, thus removing tons of duplicated code all of the the various drivers. From Jarod Wilson. 3) Support ingress actions in act_mirred, from Shmulik Ladkani. 4) Improve device adjacency tracking, from David Ahern. 5) Add support for LED triggers on PHY link state changes, from Zach Brown. 6) Improve UDP socket memory accounting, from Paolo Abeni. 7) Set SK_MEM_QUANTUM to a fixed size of 4096, instead of PAGE_SIZE. From Eric Dumazet. 8) Collapse TCP SKBs at retransmit time even if the right side SKB has frags. Also from Eric Dumazet. 9) Add IP_RECVFRAGSIZE and IPV6_RECVFRAGSIZE cmsgs, from Willem de Bruijn. 10) Support routing by UID, from Lorenzo Colitti. 11) Handle L3 domain binding (ie. VRF) for RAW sockets, from David Ahern. 12) tcp_get_info() can run lockless, from Eric Dumazet. 13) 4-tuple UDP hashing in SFC driver, from Edward Cree. 14) Avoid reorders in GRO code, from Eric Dumazet. 15) IPV6 Segment Routing support, from David Lebrun. 16) Support MPLS push and pop for L3 packets in openvswitch, from Jiri Benc. 17) Add LRU datastructure support for BPF, Martin KaFai Lau. 18) VF support in liquidio driver, from Raghu Vatsavayi. 19) Multiqueue support in alx driver, from Tobias Regnery. 20) Networking cgroup BPF support, from Daniel Mack. 21) TCP chronograph measurements, from Francis Yan. 22) XDP support for qed driver, from Yuval Mintz. 23) BPF based lwtunnels, from Thomas Graf. 24) Consistent FIB dumping to offloading drivers, from Ido Schimmel. 25) Many optimizations for UDP under high load, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) netfilter: nft_counter: rework atomic dump and reset e1000: use disable_hardirq() for e1000_netpoll() i40e: don't truncate match_method assignment net: ethernet: ti: netcp: add support of cpts net: phy: phy drivers should not set SUPPORTED_[Asym_]Pause net: l2tp: ppp: change PPPOL2TP_MSG_* => L2TP_MSG_* net: l2tp: deprecate PPPOL2TP_MSG_* in favour of L2TP_MSG_* net: l2tp: export debug flags to UAPI net: ethernet: stmmac: remove private tx queue lock net: ethernet: sxgbe: remove private tx queue lock net: bridge: shorten ageing time on topology change net: bridge: add helper to set topology change net: bridge: add helper to offload ageing time net: nicvf: use new api ethtool_{get|set}_link_ksettings net: ethernet: ti: cpsw: sync rates for channels in dual emac mode net: ethernet: ti: cpsw: re-split res only when speed is changed net: ethernet: ti: cpsw: combine budget and weight split and check net: ethernet: ti: cpsw: don't start queue twice net: ethernet: ti: cpsw: use same macros to get active slave net: mvneta: select GENERIC_ALLOCATOR ...
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c71d49ce0c93..6c790754ae3e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,6 +85,7 @@ int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+EXPORT_SYMBOL(sysctl_tcp_timestamps);
/* rfc5961 challenge ack rate limiting */
int sysctl_tcp_challenge_ack_limit = 1000;
@@ -2414,10 +2415,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
if (tp->prior_ssthresh) {
const struct inet_connection_sock *icsk = inet_csk(sk);
- if (icsk->icsk_ca_ops->undo_cwnd)
- tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
- else
- tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
+ tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
if (tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
@@ -3201,6 +3199,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->lost_skb_hint = NULL;
}
+ if (!skb)
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+
if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
tp->snd_up = tp->snd_una;
@@ -3371,9 +3372,7 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
u32 delta = ack - tp->snd_una;
sock_owned_by_me((struct sock *)tp);
- u64_stats_update_begin_raw(&tp->syncp);
tp->bytes_acked += delta;
- u64_stats_update_end_raw(&tp->syncp);
tp->snd_una = ack;
}
@@ -3383,9 +3382,7 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
u32 delta = seq - tp->rcv_nxt;
sock_owned_by_me((struct sock *)tp);
- u64_stats_update_begin_raw(&tp->syncp);
tp->bytes_received += delta;
- u64_stats_update_end_raw(&tp->syncp);
tp->rcv_nxt = seq;
}
@@ -5083,8 +5080,11 @@ static void tcp_check_space(struct sock *sk)
/* pairs with tcp_poll() */
smp_mb__after_atomic();
if (sk->sk_socket &&
- test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
tcp_new_space(sk);
+ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
+ }
}
}
@@ -6318,13 +6318,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop;
}
-
- /* Accept backlog is full. If we have already queued enough
- * of warm entries in syn queue, drop request. It is better than
- * clogging syn queue with openreqs with exponentially increasing
- * timeout.
- */
- if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+ if (sk_acceptq_is_full(sk)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
goto drop;
}
@@ -6334,6 +6328,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop;
tcp_rsk(req)->af_specific = af_ops;
+ tcp_rsk(req)->ts_off = 0;
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = af_ops->mss_clamp;
@@ -6355,6 +6350,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
+ if (isn && tmp_opt.tstamp_ok)
+ af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
+
if (!want_cookie && !isn) {
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
@@ -6395,7 +6393,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_release;
}
- isn = af_ops->init_seq(skb);
+ isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
}
if (!dst) {
dst = af_ops->route_req(sk, &fl, req, NULL);
@@ -6407,6 +6405,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (want_cookie) {
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+ tcp_rsk(req)->ts_off = 0;
req->cookie_ts = tmp_opt.tstamp_ok;
if (!tmp_opt.tstamp_ok)
inet_rsk(req)->ecn_ok = 0;