summaryrefslogtreecommitdiff
path: root/net/dccp/ccids/ccid2.c
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2008-09-04 07:30:19 +0200
committerGerrit Renker <gerrit@erg.abdn.ac.uk>2008-09-04 07:45:39 +0200
commit1435562d7e0412e4885b661843f69859013f9d25 (patch)
tree8357027ac15fa199051e8d85aa448115c3bdd2c2 /net/dccp/ccids/ccid2.c
parente9803c0104564698d3b8e84ccdb0b8b0e65427e2 (diff)
dccp ccid-2: Replace broken RTT estimator with better algorithm
The current CCID-2 RTT estimator code is in parts broken and lags behind the suggestions in RFC2988 of using scaled variants for SRTT/RTTVAR. That code is replaced by the present patch, which reuses the Linux TCP RTT estimator code - reasons for this code duplication are given below. Further details: ---------------- 1. The minimum RTO of previously one second has been replaced with TCP's, since RFC4341, sec. 5 says that the minimum of 1 sec. (suggested in RFC2988, 2.4) is not necessary. Instead, the TCP_RTO_MIN is used, which agrees with DCCP's concept of a default RTT (RFC 4340, 3.4). 2. The maximum RTO has been set to DCCP_RTO_MAX (64 sec), which agrees with RFC2988, (2.5). 3. De-inlined the function ccid2_new_ack(). 4. Added a FIXME: the RTT is sampled several times per Ack Vector, which will give the wrong estimate. It should be replaced with one sample per Ack. However, at the moment this can not be resolved easily, since - it depends on TX history code (which also needs some work), - the cleanest solution is not to use the `sent' time at all (saves 4 bytes per entry) and use DCCP timestamps / elapsed time to estimated the RTT, which however is non-trivial to get right (but needs to be done). Reasons for reusing the Linux TCP estimator algorithm: ------------------------------------------------------ Some time was spent to find a better alternative, using basic RFC2988 as a first step. Further analysis and experimentation showed that the Linux TCP RTO estimator is superior to a basic RFC2988 implementation. A summary is on http://www.erg.abdn.ac.uk/users/gerrit/dccp/notes/ccid2/rto_estimator/ In addition, this estimator fared well in a recent empirical evaluation: Rewaskar, Sushant, Jasleen Kaur and F. Donelson Smith. A Performance Study of Loss Detection/Recovery in Real-world TCP Implementations. Proceedings of 15th IEEE International Conference on Network Protocols (ICNP-07). 2007. Thus there is significant benefit in reusing the existing TCP code. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Diffstat (limited to 'net/dccp/ccids/ccid2.c')
-rw-r--r--net/dccp/ccids/ccid2.c171
1 files changed, 94 insertions, 77 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index fa074d44206..22753fd9869 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -110,12 +110,6 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
dp->dccps_l_ack_ratio = val;
}
-static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val)
-{
- ccid2_pr_debug("change SRTT to %ld\n", val);
- hctx->srtt = val;
-}
-
static void ccid2_start_rto_timer(struct sock *sk);
static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -123,7 +117,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
- long s;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
@@ -135,10 +128,8 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
/* back-off timer */
hctx->rto <<= 1;
-
- s = hctx->rto / HZ;
- if (s > 60)
- hctx->rto = 60 * HZ;
+ if (hctx->rto > DCCP_RTO_MAX)
+ hctx->rto = DCCP_RTO_MAX;
/* adjust pipe, cwnd etc */
hctx->ssthresh = hctx->cwnd / 2;
@@ -279,9 +270,87 @@ static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
ccid2_pr_debug("deleted RTO timer\n");
}
-static inline void ccid2_new_ack(struct sock *sk,
- struct ccid2_seq *seqp,
- unsigned int *maxincr)
+/**
+ * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
+ * This code is almost identical with TCP's tcp_rtt_estimator(), since
+ * - it has a higher sampling frequency (recommended by RFC 1323),
+ * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
+ * - it is simple (cf. more complex proposals such as Eifel timer or research
+ * which suggests that the gain should be set according to window size),
+ * - in tests it was found to work well with CCID2 [gerrit].
+ */
+static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
+{
+ struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
+ long m = mrtt ? : 1;
+
+ if (hctx->srtt == 0) {
+ /* First measurement m */
+ hctx->srtt = m << 3;
+ hctx->mdev = m << 1;
+
+ hctx->mdev_max = max(TCP_RTO_MIN, hctx->mdev);
+ hctx->rttvar = hctx->mdev_max;
+ hctx->rtt_seq = dccp_sk(sk)->dccps_gss;
+ } else {
+ /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
+ m -= (hctx->srtt >> 3);
+ hctx->srtt += m;
+
+ /* Similarly, update scaled mdev with regard to |m| */
+ if (m < 0) {
+ m = -m;
+ m -= (hctx->mdev >> 2);
+ /*
+ * This neutralises RTO increase when RTT < SRTT - mdev
+ * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
+ * in Linux TCP", USENIX 2002, pp. 49-62).
+ */
+ if (m > 0)
+ m >>= 3;
+ } else {
+ m -= (hctx->mdev >> 2);
+ }
+ hctx->mdev += m;
+
+ if (hctx->mdev > hctx->mdev_max) {
+ hctx->mdev_max = hctx->mdev;
+ if (hctx->mdev_max > hctx->rttvar)
+ hctx->rttvar = hctx->mdev_max;
+ }
+
+ /*
+ * Decay RTTVAR at most once per flight, exploiting that
+ * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
+ * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
+ * GAR is a useful bound for FlightSize = pipe, AWL is probably
+ * too low as it over-estimates pipe.
+ */
+ if (after48(dccp_sk(sk)->dccps_gar, hctx->rtt_seq)) {
+ if (hctx->mdev_max < hctx->rttvar)
+ hctx->rttvar -= (hctx->rttvar -
+ hctx->mdev_max) >> 2;
+ hctx->rtt_seq = dccp_sk(sk)->dccps_gss;
+ hctx->mdev_max = TCP_RTO_MIN;
+ }
+ }
+
+ /*
+ * Set RTO from SRTT and RTTVAR
+ * Clock granularity is ignored since the minimum error for RTTVAR is
+ * clamped to 50msec (corresponding to HZ=20). This leads to a minimum
+ * RTO of 200msec. This agrees with TCP and RFC 4341, 5.: "Because DCCP
+ * does not retransmit data, DCCP does not require TCP's recommended
+ * minimum timeout of one second".
+ */
+ hctx->rto = (hctx->srtt >> 3) + hctx->rttvar;
+
+ if (hctx->rto > DCCP_RTO_MAX)
+ hctx->rto = DCCP_RTO_MAX;
+}
+
+static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
+ unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
@@ -295,64 +364,15 @@ static inline void ccid2_new_ack(struct sock *sk,
hctx->cwnd += 1;
hctx->packets_acked = 0;
}
-
- /* update RTO */
- if (hctx->srtt == -1 ||
- time_after(jiffies, hctx->lastrtt + hctx->srtt)) {
- unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
- int s;
-
- /* first measurement */
- if (hctx->srtt == -1) {
- ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
- r, jiffies,
- (unsigned long long)seqp->ccid2s_seq);
- ccid2_change_srtt(hctx, r);
- hctx->rttvar = r >> 1;
- } else {
- /* RTTVAR */
- long tmp = hctx->srtt - r;
- long srtt;
-
- if (tmp < 0)
- tmp *= -1;
-
- tmp >>= 2;
- hctx->rttvar *= 3;
- hctx->rttvar >>= 2;
- hctx->rttvar += tmp;
-
- /* SRTT */
- srtt = hctx->srtt;
- srtt *= 7;
- srtt >>= 3;
- tmp = r >> 3;
- srtt += tmp;
- ccid2_change_srtt(hctx, srtt);
- }
- s = hctx->rttvar << 2;
- /* clock granularity is 1 when based on jiffies */
- if (!s)
- s = 1;
- hctx->rto = hctx->srtt + s;
-
- /* must be at least a second */
- s = hctx->rto / HZ;
- /* DCCP doesn't require this [but I like it cuz my code sux] */
-#if 1
- if (s < 1)
- hctx->rto = HZ;
-#endif
- /* max 60 seconds */
- if (s > 60)
- hctx->rto = HZ * 60;
-
- hctx->lastrtt = jiffies;
-
- ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
- hctx->srtt, hctx->rttvar,
- hctx->rto, HZ, r);
- }
+ /*
+ * FIXME: RTT is sampled several times per acknowledgment (for each
+ * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
+ * This causes the RTT to be over-estimated, since the older entries
+ * in the Ack Vector have earlier sending times.
+ * The cleanest solution is to not use the ccid2s_sent field at all
+ * and instead use DCCP timestamps - need to be resolved at some time.
+ */
+ ccid2_rtt_estimator(sk, jiffies - seqp->ccid2s_sent);
}
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
@@ -579,8 +599,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
if (hctx->pipe == 0)
sk_stop_timer(sk, &hctx->rtotimer);
else
- sk_reset_timer(sk, &hctx->rtotimer,
- jiffies + hctx->rto);
+ sk_reset_timer(sk, &hctx->rtotimer, jiffies + hctx->rto);
done:
/* check if incoming Acks allow pending packets to be sent */
if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx))
@@ -613,9 +632,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
if (ccid2_hc_tx_alloc_seq(hctx))
return -ENOMEM;
- hctx->rto = 3 * HZ;
- ccid2_change_srtt(hctx, -1);
- hctx->rttvar = -1;
+ hctx->rto = DCCP_TIMEOUT_INIT;
hctx->rpdupack = -1;
hctx->last_cong = jiffies;
setup_timer(&hctx->rtotimer, ccid2_hc_tx_rto_expire, (unsigned long)sk);