Revert "kernel 4.14/4.19: tcp_bbr: Improving TCP BBR performance for WiFi and cellular networks"
This reverts commit b0f06182c6.
This commit is contained in:
parent
b0f06182c6
commit
e4f0bd20dd
@ -1,414 +0,0 @@
|
||||
From 232aa8ec3ed979d4716891540c03a806ecab0c37 Mon Sep 17 00:00:00 2001
|
||||
From: Priyaranjan Jha <priyarjha@google.com>
|
||||
Date: Wed, 23 Jan 2019 12:04:53 -0800
|
||||
Subject: tcp_bbr: refactor bbr_target_cwnd() for general inflight provisioning
|
||||
|
||||
Because bbr_target_cwnd() is really a general-purpose BBR helper for
|
||||
computing some volume of inflight data as a function of the estimated
|
||||
BDP, refactor it into following helper functions:
|
||||
- bbr_bdp()
|
||||
- bbr_quantization_budget()
|
||||
- bbr_inflight()
|
||||
|
||||
Signed-off-by: Priyaranjan Jha <priyarjha@google.com>
|
||||
Signed-off-by: Neal Cardwell <ncardwell@google.com>
|
||||
Signed-off-by: Yuchung Cheng <ycheng@google.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
net/ipv4/tcp_bbr.c | 60 +++++++++++++++++++++++++++++++++++-------------------
|
||||
1 file changed, 39 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
|
||||
index 0f497fc..6b6c7f14 100644
|
||||
--- a/net/ipv4/tcp_bbr.c
|
||||
+++ b/net/ipv4/tcp_bbr.c
|
||||
@@ -307,30 +307,19 @@
|
||||
}
|
||||
}
|
||||
|
||||
-/* Find target cwnd. Right-size the cwnd based on min RTT and the
|
||||
- * estimated bottleneck bandwidth:
|
||||
+/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
|
||||
*
|
||||
- * cwnd = bw * min_rtt * gain = BDP * gain
|
||||
+ * bdp = bw * min_rtt * gain
|
||||
*
|
||||
* The key factor, gain, controls the amount of queue. While a small gain
|
||||
* builds a smaller queue, it becomes more vulnerable to noise in RTT
|
||||
* measurements (e.g., delayed ACKs or other ACK compression effects). This
|
||||
* noise may cause BBR to under-estimate the rate.
|
||||
- *
|
||||
- * To achieve full performance in high-speed paths, we budget enough cwnd to
|
||||
- * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
|
||||
- * - one skb in sending host Qdisc,
|
||||
- * - one skb in sending host TSO/GSO engine
|
||||
- * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
|
||||
- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
|
||||
- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
|
||||
- * which allows 2 outstanding 2-packet sequences, to try to keep pipe
|
||||
- * full even with ACK-every-other-packet delayed ACKs.
|
||||
*/
|
||||
-static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
|
||||
+static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
- u32 cwnd;
|
||||
+ u32 bdp;
|
||||
u64 w;
|
||||
|
||||
/* If we've never had a valid RTT sample, cap cwnd at the initial
|
||||
@@ -345,8 +334,25 @@
|
||||
w = (u64)bw * bbr->min_rtt_us;
|
||||
|
||||
/* Apply a gain to the given value, then remove the BW_SCALE shift. */
|
||||
- cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
|
||||
+ bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
|
||||
+
|
||||
+ return bdp;
|
||||
+}
|
||||
|
||||
+/* To achieve full performance in high-speed paths, we budget enough cwnd to
|
||||
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
|
||||
+ * - one skb in sending host Qdisc,
|
||||
+ * - one skb in sending host TSO/GSO engine
|
||||
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
|
||||
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
|
||||
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
|
||||
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
|
||||
+ * full even with ACK-every-other-packet delayed ACKs.
|
||||
+ */
|
||||
+static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
|
||||
+{
|
||||
+ struct bbr *bbr = inet_csk_ca(sk);
|
||||
+
|
||||
/* Allow enough full-sized skbs in flight to utilize end systems. */
|
||||
cwnd += 3 * bbr->tso_segs_goal;
|
||||
|
||||
@@ -360,6 +366,17 @@
|
||||
return cwnd;
|
||||
}
|
||||
|
||||
+/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
|
||||
+static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
|
||||
+{
|
||||
+ u32 inflight;
|
||||
+
|
||||
+ inflight = bbr_bdp(sk, bw, gain);
|
||||
+ inflight = bbr_quantization_budget(sk, inflight, gain);
|
||||
+
|
||||
+ return inflight;
|
||||
+}
|
||||
+
|
||||
/* An optimization in BBR to reduce losses: On the first round of recovery, we
|
||||
* follow the packet conservation principle: send P packets per P packets acked.
|
||||
* After that, we slow-start and send at most 2*P packets per P packets acked.
|
||||
@@ -427,7 +444,8 @@
|
||||
goto done;
|
||||
|
||||
/* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
||||
- target_cwnd = bbr_target_cwnd(sk, bw, gain);
|
||||
+ target_cwnd = bbr_bdp(sk, bw, gain);
|
||||
+ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
|
||||
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
|
||||
cwnd = min(cwnd + acked, target_cwnd);
|
||||
else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
|
||||
@@ -468,14 +486,14 @@
|
||||
if (bbr->pacing_gain > BBR_UNIT)
|
||||
return is_full_length &&
|
||||
(rs->losses || /* perhaps pacing_gain*BDP won't fit */
|
||||
- inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
|
||||
+ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
|
||||
|
||||
/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
|
||||
* probing didn't find more bw. If inflight falls to match BDP then we
|
||||
* estimate queue is drained; persisting would underutilize the pipe.
|
||||
*/
|
||||
return is_full_length ||
|
||||
- inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
|
||||
+ inflight <= bbr_inflight(sk, bw, BBR_UNIT);
|
||||
}
|
||||
|
||||
static void bbr_advance_cycle_phase(struct sock *sk)
|
||||
@@ -736,7 +754,7 @@
|
||||
} /* fall through to check if in-flight is already small: */
|
||||
if (bbr->mode == BBR_DRAIN &&
|
||||
tcp_packets_in_flight(tcp_sk(sk)) <=
|
||||
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
|
||||
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)
|
||||
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
|
||||
}
|
||||
|
||||
|
||||
--
|
||||
cgit v1.1
|
||||
|
||||
|
||||
From 78dc70ebaa38aa303274e333be6c98eef87619e2 Mon Sep 17 00:00:00 2001
|
||||
From: Priyaranjan Jha <priyarjha@google.com>
|
||||
Date: Wed, 23 Jan 2019 12:04:54 -0800
|
||||
Subject: tcp_bbr: adapt cwnd based on ack aggregation estimation
|
||||
|
||||
Aggregation effects are extremely common with wifi, cellular, and cable
|
||||
modem link technologies, ACK decimation in middleboxes, and LRO and GRO
|
||||
in receiving hosts. The aggregation can happen in either direction,
|
||||
data or ACKs, but in either case the aggregation effect is visible
|
||||
to the sender in the ACK stream.
|
||||
|
||||
Previously BBR's sending was often limited by cwnd under severe ACK
|
||||
aggregation/decimation because BBR sized the cwnd at 2*BDP. If packets
|
||||
were acked in bursts after long delays (e.g. one ACK acking 5*BDP after
|
||||
5*RTT), BBR's sending was halted after sending 2*BDP over 2*RTT, leaving
|
||||
the bottleneck idle for potentially long periods. Note that loss-based
|
||||
congestion control does not have this issue because when facing
|
||||
aggregation it continues increasing cwnd after bursts of ACKs, growing
|
||||
cwnd until the buffer is full.
|
||||
|
||||
To achieve good throughput in the presence of aggregation effects, this
|
||||
algorithm allows the BBR sender to put extra data in flight to keep the
|
||||
bottleneck utilized during silences in the ACK stream that it has evidence
|
||||
to suggest were caused by aggregation.
|
||||
|
||||
A summary of the algorithm: when a burst of packets are acked by a
|
||||
stretched ACK or a burst of ACKs or both, BBR first estimates the expected
|
||||
amount of data that should have been acked, based on its estimated
|
||||
bandwidth. Then the surplus ("extra_acked") is recorded in a windowed-max
|
||||
filter to estimate the recent level of observed ACK aggregation. Then cwnd
|
||||
is increased by the ACK aggregation estimate. The larger cwnd avoids BBR
|
||||
being cwnd-limited in the face of ACK silences that recent history suggests
|
||||
were caused by aggregation. As a sanity check, the ACK aggregation degree
|
||||
is upper-bounded by the cwnd (at the time of measurement) and a global max
|
||||
of BW * 100ms. The algorithm is further described by the following
|
||||
presentation:
|
||||
https://datatracker.ietf.org/meeting/101/materials/slides-101-iccrg-an-update-on-bbr-work-at-google-00
|
||||
|
||||
In our internal testing, we observed a significant increase in BBR
|
||||
throughput (measured using netperf), in a basic wifi setup.
|
||||
- Host1 (sender on ethernet) -> AP -> Host2 (receiver on wifi)
|
||||
- 2.4 GHz -> BBR before: ~73 Mbps; BBR after: ~102 Mbps; CUBIC: ~100 Mbps
|
||||
- 5.0 GHz -> BBR before: ~362 Mbps; BBR after: ~593 Mbps; CUBIC: ~601 Mbps
|
||||
|
||||
Also, this code is running globally on YouTube TCP connections and produced
|
||||
significant bandwidth increases for YouTube traffic.
|
||||
|
||||
This is based on Ian Swett's max_ack_height_ algorithm from the
|
||||
QUIC BBR implementation.
|
||||
|
||||
Signed-off-by: Priyaranjan Jha <priyarjha@google.com>
|
||||
Signed-off-by: Neal Cardwell <ncardwell@google.com>
|
||||
Signed-off-by: Yuchung Cheng <ycheng@google.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
include/net/inet_connection_sock.h | 4 +-
|
||||
net/ipv4/tcp_bbr.c | 122 ++++++++++++++++++++++++++++++++++++-
|
||||
2 files changed, 123 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
|
||||
index 371b3b4..fe0d9b4 100644
|
||||
--- a/include/net/inet_connection_sock.h
|
||||
+++ b/include/net/inet_connection_sock.h
|
||||
@@ -136,8 +136,8 @@
|
||||
} icsk_mtup;
|
||||
u32 icsk_user_timeout;
|
||||
|
||||
- u64 icsk_ca_priv[88 / sizeof(u64)];
|
||||
-#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
|
||||
+ u64 icsk_ca_priv[104 / sizeof(u64)];
|
||||
+#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
|
||||
};
|
||||
|
||||
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
|
||||
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
|
||||
index 6b6c7f14..56be7d2 100644
|
||||
--- a/net/ipv4/tcp_bbr.c
|
||||
+++ b/net/ipv4/tcp_bbr.c
|
||||
@@ -117,6 +117,15 @@
|
||||
unused_b:5;
|
||||
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
|
||||
u32 full_bw; /* recent bw, to estimate if pipe is full */
|
||||
+
|
||||
+
|
||||
+ /* For tracking ACK aggregation: */
|
||||
+ u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
|
||||
+ u16 extra_acked[2]; /* max excess data ACKed in epoch */
|
||||
+ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
|
||||
+ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
|
||||
+ extra_acked_win_idx:1, /* current index in extra_acked array */
|
||||
+ unused_c:6;
|
||||
};
|
||||
|
||||
#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
|
||||
@@ -176,6 +185,15 @@
|
||||
/* If we estimate we're policed, use lt_bw for this many round trips: */
|
||||
static const u32 bbr_lt_bw_max_rtts = 48;
|
||||
|
||||
+/* Gain factor for adding extra_acked to target cwnd: */
|
||||
+static const int bbr_extra_acked_gain = BBR_UNIT;
|
||||
+/* Window length of extra_acked window. */
|
||||
+static const u32 bbr_extra_acked_win_rtts = 5;
|
||||
+/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
|
||||
+static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
|
||||
+/* Time period for clamping cwnd increment due to ack aggregation */
|
||||
+static const u32 bbr_extra_acked_max_us = 100 * 1000;
|
||||
+
|
||||
/* Do we estimate that STARTUP filled the pipe? */
|
||||
static bool bbr_full_bw_reached(const struct sock *sk)
|
||||
{
|
||||
@@ -200,6 +218,16 @@
|
||||
return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
|
||||
}
|
||||
|
||||
+/* Return maximum extra acked in past k-2k round trips,
|
||||
+ * where k = bbr_extra_acked_win_rtts.
|
||||
+ */
|
||||
+static u16 bbr_extra_acked(const struct sock *sk)
|
||||
+{
|
||||
+ struct bbr *bbr = inet_csk_ca(sk);
|
||||
+
|
||||
+ return max(bbr->extra_acked[0], bbr->extra_acked[1]);
|
||||
+}
|
||||
+
|
||||
/* Return rate in bytes per second, optionally with a gain.
|
||||
* The order here is chosen carefully to avoid overflow of u64. This should
|
||||
* work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
|
||||
@@ -299,6 +327,8 @@
|
||||
|
||||
if (event == CA_EVENT_TX_START && tp->app_limited) {
|
||||
bbr->idle_restart = 1;
|
||||
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
|
||||
+ bbr->ack_epoch_acked = 0;
|
||||
/* Avoid pointless buffer overflows: pace at est. bw if we don't
|
||||
* need more speed (we're restarting from idle and app-limited).
|
||||
*/
|
||||
@@ -366,6 +396,22 @@
|
||||
return cwnd;
|
||||
}
|
||||
|
||||
+/* Find the cwnd increment based on estimate of ack aggregation */
|
||||
+static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
|
||||
+{
|
||||
+ u32 max_aggr_cwnd, aggr_cwnd = 0;
|
||||
+
|
||||
+ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
|
||||
+ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
|
||||
+ / BW_UNIT;
|
||||
+ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
|
||||
+ >> BBR_SCALE;
|
||||
+ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
|
||||
+ }
|
||||
+
|
||||
+ return aggr_cwnd;
|
||||
+}
|
||||
+
|
||||
/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
|
||||
static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
@@ -443,8 +489,14 @@
|
||||
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
|
||||
goto done;
|
||||
|
||||
- /* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
||||
target_cwnd = bbr_bdp(sk, bw, gain);
|
||||
+
|
||||
+ /* Increment the cwnd to account for excess ACKed data that seems
|
||||
+ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
|
||||
+ */
|
||||
+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
|
||||
+
|
||||
+ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
||||
target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
|
||||
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
|
||||
cwnd = min(cwnd + acked, target_cwnd);
|
||||
@@ -715,6 +767,67 @@
|
||||
}
|
||||
}
|
||||
|
||||
+/* Estimates the windowed max degree of ack aggregation.
|
||||
+ * This is used to provision extra in-flight data to keep sending during
|
||||
+ * inter-ACK silences.
|
||||
+ *
|
||||
+ * Degree of ack aggregation is estimated as extra data acked beyond expected.
|
||||
+ *
|
||||
+ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
|
||||
+ * cwnd += max_extra_acked
|
||||
+ *
|
||||
+ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
|
||||
+ * Max filter is an approximate sliding window of 5-10 (packet timed) round
|
||||
+ * trips.
|
||||
+ */
|
||||
+static void bbr_update_ack_aggregation(struct sock *sk,
|
||||
+ const struct rate_sample *rs)
|
||||
+{
|
||||
+ u32 epoch_us, expected_acked, extra_acked;
|
||||
+ struct bbr *bbr = inet_csk_ca(sk);
|
||||
+ struct tcp_sock *tp = tcp_sk(sk);
|
||||
+
|
||||
+ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
|
||||
+ rs->delivered < 0 || rs->interval_us <= 0)
|
||||
+ return;
|
||||
+
|
||||
+ if (bbr->round_start) {
|
||||
+ bbr->extra_acked_win_rtts = min(0x1F,
|
||||
+ bbr->extra_acked_win_rtts 1);
|
||||
+ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
|
||||
+ bbr->extra_acked_win_rtts = 0;
|
||||
+ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
|
||||
+ 0 : 1;
|
||||
+ bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Compute how many packets we expected to be delivered over epoch. */
|
||||
+ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
|
||||
+ bbr->ack_epoch_mstamp);
|
||||
+ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
|
||||
+
|
||||
+ /* Reset the aggregation epoch if ACK rate is below expected rate or
|
||||
+ * significantly large no. of ack received since epoch (potentially
|
||||
+ * quite old epoch).
|
||||
+ */
|
||||
+ if (bbr->ack_epoch_acked <= expected_acked ||
|
||||
+ (bbr->ack_epoch_acked rs->acked_sacked >=
|
||||
+ bbr_ack_epoch_acked_reset_thresh)) {
|
||||
+ bbr->ack_epoch_acked = 0;
|
||||
+ bbr->ack_epoch_mstamp = tp->delivered_mstamp;
|
||||
+ expected_acked = 0;
|
||||
+ }
|
||||
+
|
||||
+ /* Compute excess data delivered, beyond what was expected. */
|
||||
+ bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
|
||||
+ bbr->ack_epoch_acked rs->acked_sacked);
|
||||
+ extra_acked = bbr->ack_epoch_acked - expected_acked;
|
||||
+ extra_acked = min(extra_acked, tp->snd_cwnd);
|
||||
+ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
|
||||
+ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
|
||||
+}
|
||||
+
|
||||
/* Estimate when the pipe is full, using the change in delivery rate: BBR
|
||||
* estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
|
||||
* at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
|
||||
@@ -831,6 +944,7 @@
|
||||
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
|
||||
{
|
||||
bbr_update_bw(sk, rs);
|
||||
+ bbr_update_ack_aggregation(sk, rs);
|
||||
bbr_update_cycle_phase(sk, rs);
|
||||
bbr_check_full_bw_reached(sk, rs);
|
||||
bbr_check_drain(sk, rs);
|
||||
@@ -883,6 +997,13 @@
|
||||
bbr_reset_lt_bw_sampling(sk);
|
||||
bbr_reset_startup_mode(sk);
|
||||
|
||||
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
|
||||
+ bbr->ack_epoch_acked = 0;
|
||||
+ bbr->extra_acked_win_rtts = 0;
|
||||
+ bbr->extra_acked_win_idx = 0;
|
||||
+ bbr->extra_acked[0] = 0;
|
||||
+ bbr->extra_acked[1] = 0;
|
||||
+
|
||||
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
|
||||
}
|
||||
|
||||
|
||||
--
|
||||
cgit v1.1
|
||||
|
||||
@ -1,417 +0,0 @@
|
||||
From 232aa8ec3ed979d4716891540c03a806ecab0c37 Mon Sep 17 00:00:00 2001
|
||||
From: Priyaranjan Jha <priyarjha@google.com>
|
||||
Date: Wed, 23 Jan 2019 12:04:53 -0800
|
||||
Subject: tcp_bbr: refactor bbr_target_cwnd() for general inflight provisioning
|
||||
|
||||
Because bbr_target_cwnd() is really a general-purpose BBR helper for
|
||||
computing some volume of inflight data as a function of the estimated
|
||||
BDP, refactor it into following helper functions:
|
||||
- bbr_bdp()
|
||||
- bbr_quantization_budget()
|
||||
- bbr_inflight()
|
||||
|
||||
Signed-off-by: Priyaranjan Jha <priyarjha@google.com>
|
||||
Signed-off-by: Neal Cardwell <ncardwell@google.com>
|
||||
Signed-off-by: Yuchung Cheng <ycheng@google.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
net/ipv4/tcp_bbr.c | 60 +++++++++++++++++++++++++++++++++++-------------------
|
||||
1 file changed, 39 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
|
||||
index 0f497fc..6b6c7f14 100644
|
||||
--- a/net/ipv4/tcp_bbr.c
|
||||
+++ b/net/ipv4/tcp_bbr.c
|
||||
@@ -315,30 +315,19 @@
|
||||
}
|
||||
}
|
||||
|
||||
-/* Find target cwnd. Right-size the cwnd based on min RTT and the
|
||||
- * estimated bottleneck bandwidth:
|
||||
+/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
|
||||
*
|
||||
- * cwnd = bw * min_rtt * gain = BDP * gain
|
||||
+ * bdp = bw * min_rtt * gain
|
||||
*
|
||||
* The key factor, gain, controls the amount of queue. While a small gain
|
||||
* builds a smaller queue, it becomes more vulnerable to noise in RTT
|
||||
* measurements (e.g., delayed ACKs or other ACK compression effects). This
|
||||
* noise may cause BBR to under-estimate the rate.
|
||||
- *
|
||||
- * To achieve full performance in high-speed paths, we budget enough cwnd to
|
||||
- * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
|
||||
- * - one skb in sending host Qdisc,
|
||||
- * - one skb in sending host TSO/GSO engine
|
||||
- * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
|
||||
- * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
|
||||
- * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
|
||||
- * which allows 2 outstanding 2-packet sequences, to try to keep pipe
|
||||
- * full even with ACK-every-other-packet delayed ACKs.
|
||||
*/
|
||||
-static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
|
||||
+static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
- u32 cwnd;
|
||||
+ u32 bdp;
|
||||
u64 w;
|
||||
|
||||
/* If we've never had a valid RTT sample, cap cwnd at the initial
|
||||
@@ -353,7 +342,24 @@
|
||||
w = (u64)bw * bbr->min_rtt_us;
|
||||
|
||||
/* Apply a gain to the given value, then remove the BW_SCALE shift. */
|
||||
- cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
|
||||
+ bdp = (((w * gain) >> BBR_SCALE) BW_UNIT - 1) / BW_UNIT;
|
||||
+
|
||||
+ return bdp;
|
||||
+}
|
||||
+
|
||||
+/* To achieve full performance in high-speed paths, we budget enough cwnd to
|
||||
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
|
||||
+ * - one skb in sending host Qdisc,
|
||||
+ * - one skb in sending host TSO/GSO engine
|
||||
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
|
||||
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
|
||||
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
|
||||
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
|
||||
+ * full even with ACK-every-other-packet delayed ACKs.
|
||||
+ */
|
||||
+static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
|
||||
+{
|
||||
+ struct bbr *bbr = inet_csk_ca(sk);
|
||||
|
||||
/* Allow enough full-sized skbs in flight to utilize end systems. */
|
||||
cwnd += 3 * bbr_tso_segs_goal(sk);
|
||||
@@ -368,6 +374,17 @@
|
||||
return cwnd;
|
||||
}
|
||||
|
||||
+/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
|
||||
+static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
|
||||
+{
|
||||
+ u32 inflight;
|
||||
+
|
||||
+ inflight = bbr_bdp(sk, bw, gain);
|
||||
+ inflight = bbr_quantization_budget(sk, inflight, gain);
|
||||
+
|
||||
+ return inflight;
|
||||
+}
|
||||
+
|
||||
/* An optimization in BBR to reduce losses: On the first round of recovery, we
|
||||
* follow the packet conservation principle: send P packets per P packets acked.
|
||||
* After that, we slow-start and send at most 2*P packets per P packets acked.
|
||||
@@ -429,7 +446,8 @@
|
||||
goto done;
|
||||
|
||||
/* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
||||
- target_cwnd = bbr_target_cwnd(sk, bw, gain);
|
||||
+ target_cwnd = bbr_bdp(sk, bw, gain);
|
||||
+ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
|
||||
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
|
||||
cwnd = min(cwnd + acked, target_cwnd);
|
||||
else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
|
||||
@@ -470,14 +488,14 @@
|
||||
if (bbr->pacing_gain > BBR_UNIT)
|
||||
return is_full_length &&
|
||||
(rs->losses || /* perhaps pacing_gain*BDP won't fit */
|
||||
- inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
|
||||
+ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
|
||||
|
||||
/* A pacing_gain < 1.0 tries to drain extra queue we added if bw
|
||||
* probing didn't find more bw. If inflight falls to match BDP then we
|
||||
* estimate queue is drained; persisting would underutilize the pipe.
|
||||
*/
|
||||
return is_full_length ||
|
||||
- inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
|
||||
+ inflight <= bbr_inflight(sk, bw, BBR_UNIT);
|
||||
}
|
||||
|
||||
static void bbr_advance_cycle_phase(struct sock *sk)
|
||||
@@ -736,11 +754,11 @@
|
||||
bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
|
||||
bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
|
||||
tcp_sk(sk)->snd_ssthresh =
|
||||
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
|
||||
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
|
||||
} /* fall through to check if in-flight is already small: */
|
||||
if (bbr->mode == BBR_DRAIN &&
|
||||
tcp_packets_in_flight(tcp_sk(sk)) <=
|
||||
- bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
|
||||
+ bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
|
||||
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
|
||||
}
|
||||
|
||||
|
||||
--
|
||||
cgit v1.1
|
||||
|
||||
|
||||
From 78dc70ebaa38aa303274e333be6c98eef87619e2 Mon Sep 17 00:00:00 2001
|
||||
From: Priyaranjan Jha <priyarjha@google.com>
|
||||
Date: Wed, 23 Jan 2019 12:04:54 -0800
|
||||
Subject: tcp_bbr: adapt cwnd based on ack aggregation estimation
|
||||
|
||||
Aggregation effects are extremely common with wifi, cellular, and cable
|
||||
modem link technologies, ACK decimation in middleboxes, and LRO and GRO
|
||||
in receiving hosts. The aggregation can happen in either direction,
|
||||
data or ACKs, but in either case the aggregation effect is visible
|
||||
to the sender in the ACK stream.
|
||||
|
||||
Previously BBR's sending was often limited by cwnd under severe ACK
|
||||
aggregation/decimation because BBR sized the cwnd at 2*BDP. If packets
|
||||
were acked in bursts after long delays (e.g. one ACK acking 5*BDP after
|
||||
5*RTT), BBR's sending was halted after sending 2*BDP over 2*RTT, leaving
|
||||
the bottleneck idle for potentially long periods. Note that loss-based
|
||||
congestion control does not have this issue because when facing
|
||||
aggregation it continues increasing cwnd after bursts of ACKs, growing
|
||||
cwnd until the buffer is full.
|
||||
|
||||
To achieve good throughput in the presence of aggregation effects, this
|
||||
algorithm allows the BBR sender to put extra data in flight to keep the
|
||||
bottleneck utilized during silences in the ACK stream that it has evidence
|
||||
to suggest were caused by aggregation.
|
||||
|
||||
A summary of the algorithm: when a burst of packets are acked by a
|
||||
stretched ACK or a burst of ACKs or both, BBR first estimates the expected
|
||||
amount of data that should have been acked, based on its estimated
|
||||
bandwidth. Then the surplus ("extra_acked") is recorded in a windowed-max
|
||||
filter to estimate the recent level of observed ACK aggregation. Then cwnd
|
||||
is increased by the ACK aggregation estimate. The larger cwnd avoids BBR
|
||||
being cwnd-limited in the face of ACK silences that recent history suggests
|
||||
were caused by aggregation. As a sanity check, the ACK aggregation degree
|
||||
is upper-bounded by the cwnd (at the time of measurement) and a global max
|
||||
of BW * 100ms. The algorithm is further described by the following
|
||||
presentation:
|
||||
https://datatracker.ietf.org/meeting/101/materials/slides-101-iccrg-an-update-on-bbr-work-at-google-00
|
||||
|
||||
In our internal testing, we observed a significant increase in BBR
|
||||
throughput (measured using netperf), in a basic wifi setup.
|
||||
- Host1 (sender on ethernet) -> AP -> Host2 (receiver on wifi)
|
||||
- 2.4 GHz -> BBR before: ~73 Mbps; BBR after: ~102 Mbps; CUBIC: ~100 Mbps
|
||||
- 5.0 GHz -> BBR before: ~362 Mbps; BBR after: ~593 Mbps; CUBIC: ~601 Mbps
|
||||
|
||||
Also, this code is running globally on YouTube TCP connections and produced
|
||||
significant bandwidth increases for YouTube traffic.
|
||||
|
||||
This is based on Ian Swett's max_ack_height_ algorithm from the
|
||||
QUIC BBR implementation.
|
||||
|
||||
Signed-off-by: Priyaranjan Jha <priyarjha@google.com>
|
||||
Signed-off-by: Neal Cardwell <ncardwell@google.com>
|
||||
Signed-off-by: Yuchung Cheng <ycheng@google.com>
|
||||
Signed-off-by: David S. Miller <davem@davemloft.net>
|
||||
---
|
||||
include/net/inet_connection_sock.h | 4 +-
|
||||
net/ipv4/tcp_bbr.c | 122 ++++++++++++++++++++++++++++++++++++-
|
||||
2 files changed, 123 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
|
||||
index 371b3b4..fe0d9b4 100644
|
||||
--- a/include/net/inet_connection_sock.h
|
||||
+++ b/include/net/inet_connection_sock.h
|
||||
@@ -139,8 +139,8 @@ struct inet_connection_sock {
|
||||
} icsk_mtup;
|
||||
u32 icsk_user_timeout;
|
||||
|
||||
- u64 icsk_ca_priv[88 / sizeof(u64)];
|
||||
-#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
|
||||
+ u64 icsk_ca_priv[104 / sizeof(u64)];
|
||||
+#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64))
|
||||
};
|
||||
|
||||
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
|
||||
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
|
||||
index 6b6c7f14..56be7d2 100644
|
||||
--- a/net/ipv4/tcp_bbr.c
|
||||
+++ b/net/ipv4/tcp_bbr.c
|
||||
@@ -115,6 +115,14 @@ struct bbr {
|
||||
unused_b:5;
|
||||
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
|
||||
u32 full_bw; /* recent bw, to estimate if pipe is full */
|
||||
+
|
||||
+ /* For tracking ACK aggregation: */
|
||||
+ u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
|
||||
+ u16 extra_acked[2]; /* max excess data ACKed in epoch */
|
||||
+ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
|
||||
+ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
|
||||
+ extra_acked_win_idx:1, /* current index in extra_acked array */
|
||||
+ unused_c:6;
|
||||
};
|
||||
|
||||
#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
|
||||
@@ -182,6 +190,15 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
|
||||
/* If we estimate we're policed, use lt_bw for this many round trips: */
|
||||
static const u32 bbr_lt_bw_max_rtts = 48;
|
||||
|
||||
+/* Gain factor for adding extra_acked to target cwnd: */
|
||||
+static const int bbr_extra_acked_gain = BBR_UNIT;
|
||||
+/* Window length of extra_acked window. */
|
||||
+static const u32 bbr_extra_acked_win_rtts = 5;
|
||||
+/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
|
||||
+static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
|
||||
+/* Time period for clamping cwnd increment due to ack aggregation */
|
||||
+static const u32 bbr_extra_acked_max_us = 100 * 1000;
|
||||
+
|
||||
static void bbr_check_probe_rtt_done(struct sock *sk);
|
||||
|
||||
/* Do we estimate that STARTUP filled the pipe? */
|
||||
@@ -208,6 +225,16 @@ static u32 bbr_bw(const struct sock *sk)
|
||||
return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
|
||||
}
|
||||
|
||||
+/* Return maximum extra acked in past k-2k round trips,
|
||||
+ * where k = bbr_extra_acked_win_rtts.
|
||||
+ */
|
||||
+static u16 bbr_extra_acked(const struct sock *sk)
|
||||
+{
|
||||
+ struct bbr *bbr = inet_csk_ca(sk);
|
||||
+
|
||||
+ return max(bbr->extra_acked[0], bbr->extra_acked[1]);
|
||||
+}
|
||||
+
|
||||
/* Return rate in bytes per second, optionally with a gain.
|
||||
* The order here is chosen carefully to avoid overflow of u64. This should
|
||||
* work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
|
||||
@@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
||||
|
||||
if (event == CA_EVENT_TX_START && tp->app_limited) {
|
||||
bbr->idle_restart = 1;
|
||||
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
|
||||
+ bbr->ack_epoch_acked = 0;
|
||||
/* Avoid pointless buffer overflows: pace at est. bw if we don't
|
||||
* need more speed (we're restarting from idle and app-limited).
|
||||
*/
|
||||
@@ -418,6 +447,22 @@ static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
|
||||
return inflight_at_edt - interval_delivered;
|
||||
}
|
||||
|
||||
+/* Find the cwnd increment based on estimate of ack aggregation */
|
||||
+static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
|
||||
+{
|
||||
+ u32 max_aggr_cwnd, aggr_cwnd = 0;
|
||||
+
|
||||
+ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
|
||||
+ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
|
||||
+ / BW_UNIT;
|
||||
+ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
|
||||
+ >> BBR_SCALE;
|
||||
+ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
|
||||
+ }
|
||||
+
|
||||
+ return aggr_cwnd;
|
||||
+}
|
||||
+
|
||||
/* An optimization in BBR to reduce losses: On the first round of recovery, we
|
||||
* follow the packet conservation principle: send P packets per P packets acked.
|
||||
* After that, we slow-start and send at most 2*P packets per P packets acked.
|
||||
@@ -478,9 +523,15 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
|
||||
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
|
||||
goto done;
|
||||
|
||||
- /* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
||||
target_cwnd = bbr_bdp(sk, bw, gain);
|
||||
+
|
||||
+ /* Increment the cwnd to account for excess ACKed data that seems
|
||||
+ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
|
||||
+ */
|
||||
+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
|
||||
target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
|
||||
+
|
||||
+ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
|
||||
if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
|
||||
cwnd = min(cwnd + acked, target_cwnd);
|
||||
else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
|
||||
@@ -745,6 +796,67 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
|
||||
}
|
||||
}
|
||||
|
||||
+/* Estimates the windowed max degree of ack aggregation.
|
||||
+ * This is used to provision extra in-flight data to keep sending during
|
||||
+ * inter-ACK silences.
|
||||
+ *
|
||||
+ * Degree of ack aggregation is estimated as extra data acked beyond expected.
|
||||
+ *
|
||||
+ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
|
||||
+ * cwnd += max_extra_acked
|
||||
+ *
|
||||
+ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
|
||||
+ * Max filter is an approximate sliding window of 5-10 (packet timed) round
|
||||
+ * trips.
|
||||
+ */
|
||||
+static void bbr_update_ack_aggregation(struct sock *sk,
|
||||
+ const struct rate_sample *rs)
|
||||
+{
|
||||
+ u32 epoch_us, expected_acked, extra_acked;
|
||||
+ struct bbr *bbr = inet_csk_ca(sk);
|
||||
+ struct tcp_sock *tp = tcp_sk(sk);
|
||||
+
|
||||
+ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
|
||||
+ rs->delivered < 0 || rs->interval_us <= 0)
|
||||
+ return;
|
||||
+
|
||||
+ if (bbr->round_start) {
|
||||
+ bbr->extra_acked_win_rtts = min(0x1F,
|
||||
+ bbr->extra_acked_win_rtts + 1);
|
||||
+ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
|
||||
+ bbr->extra_acked_win_rtts = 0;
|
||||
+ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
|
||||
+ 0 : 1;
|
||||
+ bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ /* Compute how many packets we expected to be delivered over epoch. */
|
||||
+ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
|
||||
+ bbr->ack_epoch_mstamp);
|
||||
+ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
|
||||
+
|
||||
+ /* Reset the aggregation epoch if ACK rate is below expected rate or
|
||||
+ * significantly large no. of ack received since epoch (potentially
|
||||
+ * quite old epoch).
|
||||
+ */
|
||||
+ if (bbr->ack_epoch_acked <= expected_acked ||
|
||||
+ (bbr->ack_epoch_acked + rs->acked_sacked >=
|
||||
+ bbr_ack_epoch_acked_reset_thresh)) {
|
||||
+ bbr->ack_epoch_acked = 0;
|
||||
+ bbr->ack_epoch_mstamp = tp->delivered_mstamp;
|
||||
+ expected_acked = 0;
|
||||
+ }
|
||||
+
|
||||
+ /* Compute excess data delivered, beyond what was expected. */
|
||||
+ bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
|
||||
+ bbr->ack_epoch_acked + rs->acked_sacked);
|
||||
+ extra_acked = bbr->ack_epoch_acked - expected_acked;
|
||||
+ extra_acked = min(extra_acked, tp->snd_cwnd);
|
||||
+ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
|
||||
+ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
|
||||
+}
|
||||
+
|
||||
/* Estimate when the pipe is full, using the change in delivery rate: BBR
|
||||
* estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
|
||||
* at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
|
||||
@@ -899,6 +1011,7 @@ static void bbr_update_gains(struct sock *sk)
|
||||
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
|
||||
{
|
||||
bbr_update_bw(sk, rs);
|
||||
+ bbr_update_ack_aggregation(sk, rs);
|
||||
bbr_update_cycle_phase(sk, rs);
|
||||
bbr_check_full_bw_reached(sk, rs);
|
||||
bbr_check_drain(sk, rs);
|
||||
@@ -950,6 +1063,13 @@ static void bbr_init(struct sock *sk)
|
||||
bbr_reset_lt_bw_sampling(sk);
|
||||
bbr_reset_startup_mode(sk);
|
||||
|
||||
+ bbr->ack_epoch_mstamp = tp->tcp_mstamp;
|
||||
+ bbr->ack_epoch_acked = 0;
|
||||
+ bbr->extra_acked_win_rtts = 0;
|
||||
+ bbr->extra_acked_win_idx = 0;
|
||||
+ bbr->extra_acked[0] = 0;
|
||||
+ bbr->extra_acked[1] = 0;
|
||||
+
|
||||
cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
|
||||
}
|
||||
|
||||
--
|
||||
cgit v1.1
|
||||
|
||||
Loading…
Reference in New Issue
Block a user